source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
gemm.h | /*
+---------------------------------------------------------------------------+
| Juzhen: C++ library for linear algebra |
+---------------------------------------------------------------------------+
| |
| Copyright 2011 Hui Chen |
| |
| Licensed under the Apache License, Version 2.0 (the "License"); |
| you may not use this file except in compliance with the License. |
| You may obtain a copy of the License at |
| |
| http://www.apache.org/licenses/LICENSE-2.0 |
| |
| Unless required by applicable law or agreed to in writing, software |
| distributed under the License is distributed on an "AS IS" BASIS, |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| See the License for the specific language governing permissions and |
| limitations under the License. |
| |
+---------------------------------------------------------------------------+
*/
#ifndef SRC_ADAPTOR_NATIVE_GEMM_H_
#define SRC_ADAPTOR_NATIVE_GEMM_H_
namespace juzhen {
template<typename T>
void gemm(
const int M, const int N,
const int K, const T *A, const int lda, const T *B,
const int ldb, T *c, const int ldc) {
const T *ai, *bi;
T *ci;
#pragma omp parallel for private(ai, bi, ci)
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++) {
ai = A + i;
bi = B + j * ldb;
ci = c + j * ldc + i;
(*ci) = 0;
for (int k = 0; k < K; k++) {
(*ci) += (*ai) * (*(bi++));
ai += lda;
}
}
}
}
}
#endif // SRC_ADAPTOR_NATIVE_GEMM_H_
|
activate.c | #include "lib.h"
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
void NEURALOPS_SYMBOL(rect_fwd)(
size_t batch_sz,
size_t dim,
const float *in_buf,
float *out_buf)
{
#pragma omp parallel for
for (size_t p = 0; p < batch_sz * dim; p += 1) {
float x = in_buf[p];
out_buf[p] = x * (x > 0.0f);
}
}
void NEURALOPS_SYMBOL(rect_bwd)(
size_t batch_sz,
size_t dim,
const float *out_buf,
const float *out_grad,
float *in_grad)
{
#pragma omp parallel for
for (size_t p = 0; p < batch_sz * dim; p += 1) {
float y = out_buf[p];
float dy = out_grad[p];
in_grad[p] = dy * (y > 0.0f);
}
}
void NEURALOPS_SYMBOL(logistic_fwd)(
size_t batch_sz,
size_t dim,
const float *in_buf,
float *out_buf)
{
#pragma omp parallel for
for (size_t p = 0; p < batch_sz * dim; p += 1) {
float x = in_buf[p];
out_buf[p] = 1.0f / (1.0f + expf(-x));
}
}
void NEURALOPS_SYMBOL(logistic_bwd)(
size_t batch_sz,
size_t dim,
const float *out_buf,
const float *out_grad,
float *in_grad)
{
#pragma omp parallel for
for (size_t p = 0; p < batch_sz * dim; p += 1) {
float y = out_buf[p];
float dy = out_grad[p];
in_grad[p] = y * (1.0f - y) * dy;
}
}
|
cpu.c | #include "additionally.h" // some definitions from: im2col.h, blas.h, list.h, utils.h, activations.h, tree.h, layer.h, network.h
// softmax_layer.h, reorg_layer.h, route_layer.h, region_layer.h, maxpool_layer.h, convolutional_layer.h
#define GEMMCONV
/*
// from: box.h
typedef struct {
float x, y, w, h;
} box;
*/
// 4 layers in 1: convolution, batch-normalization, BIAS and activation
void forward_convolutional_layer_cpu(layer l, network_state state)
{
int out_h = (l.h + 2 * l.pad - l.size) / l.stride + 1; // output_height=input_height for stride=1 and pad=1
int out_w = (l.w + 2 * l.pad - l.size) / l.stride + 1; // output_width=input_width for stride=1 and pad=1
int i, f, j;
// fill zero (ALPHA)
for (i = 0; i < l.outputs; ++i) l.output[i] = 0;
// l.n - number of filters on this layer
// l.c - channels of input-array
// l.h - height of input-array
// l.w - width of input-array
// l.size - width and height of filters (the same size for all filters)
// 1. Convolution !!!
#ifndef GEMMCONV
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < l.n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < l.c; ++chan)
// input - y
for (y = 0; y < l.h; ++y)
// input - x
for (x = 0; x < l.w; ++x)
{
int const output_index = fil*l.w*l.h + y*l.w + x;
int const weights_pre_index = fil*l.c*l.size*l.size + chan*l.size*l.size;
int const input_pre_index = chan*l.w*l.h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < l.size; ++f_y)
{
int input_y = y + f_y - l.pad;
// filter - x
for (f_x = 0; f_x < l.size; ++f_x)
{
int input_x = x + f_x - l.pad;
if (input_y < 0 || input_x < 0 || input_y >= l.h || input_x >= l.w) continue;
int input_index = input_pre_index + input_y*l.w + input_x;
int weights_index = weights_pre_index + f_y*l.size + f_x;
sum += state.input[input_index] * l.weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
l.output[output_index] += sum;
}
}
#else
int m = l.n;
int k = l.size*l.size*l.c;
int n = out_h*out_w;
float *a = l.weights;
float *b = state.workspace;
float *c = l.output;
// convolution as GEMM (as part of BLAS)
for (i = 0; i < l.batch; ++i) {
im2col_cpu(state.input, l.c, l.h, l.w, l.size, l.stride, l.pad, b); // im2col.c
int t;
#pragma omp parallel for
for (t = 0; t < m; ++t) {
gemm_nn(1, n, k, 1, a + t*k, k, b, n, c + t*n, n);
}
c += n*m;
state.input += l.c*l.h*l.w;
}
#endif
int const out_size = out_h*out_w;
// 2. Batch normalization
if (l.batch_normalize) {
for (f = 0; f < l.out_c; ++f) {
for (i = 0; i < out_size; ++i) {
int index = f*out_size + i;
l.output[index] = (l.output[index] - l.rolling_mean[f]) / (sqrtf(l.rolling_variance[f]) + .000001f);
}
}
// scale_bias
for (i = 0; i < l.out_c; ++i) {
for (j = 0; j < out_size; ++j) {
l.output[i*out_size + j] *= l.scales[i];
}
}
}
// 3. Add BIAS
//if (l.batch_normalize)
for (i = 0; i < l.n; ++i) {
for (j = 0; j < out_size; ++j) {
l.output[i*out_size + j] += l.biases[i];
}
}
// 4. Activation function (LEAKY or LINEAR)
if (l.activation == LEAKY) {
for (i = 0; i < l.n*out_size; ++i) {
l.output[i] = leaky_activate(l.output[i]);
}
}
}
// MAX pooling layer
void forward_maxpool_layer_cpu(const layer l, network_state state)
{
int b, i, j, k, m, n;
int w_offset = -l.pad;
int h_offset = -l.pad;
int h = l.out_h;
int w = l.out_w;
int c = l.c;
// batch index
for (b = 0; b < l.batch; ++b) {
// channel index
for (k = 0; k < c; ++k) {
// y - input
for (i = 0; i < h; ++i) {
// x - input
for (j = 0; j < w; ++j) {
int out_index = j + w*(i + h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
// pooling x-index
for (n = 0; n < l.size; ++n) {
// pooling y-index
for (m = 0; m < l.size; ++m) {
int cur_h = h_offset + i*l.stride + n;
int cur_w = w_offset + j*l.stride + m;
int index = cur_w + l.w*(cur_h + l.h*(k + b*l.c));
int valid = (cur_h >= 0 && cur_h < l.h &&
cur_w >= 0 && cur_w < l.w);
float val = (valid != 0) ? state.input[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i; // get max index
max = (val > max) ? val : max; // get max value
}
}
l.output[out_index] = max; // store max value
l.indexes[out_index] = max_i; // store max index
}
}
}
}
}
// Route layer - just copy 1 or more layers into the current layer
void forward_route_layer_cpu(const layer l, network_state state)
{
int i, j;
int offset = 0;
// number of merged layers
for (i = 0; i < l.n; ++i) {
int index = l.input_layers[i]; // source layer index
float *input = state.net.layers[index].output; // source layer output ptr
int input_size = l.input_sizes[i]; // source layer size
// batch index
for (j = 0; j < l.batch; ++j) {
memcpy(l.output + offset + j*l.outputs, input + j*input_size, input_size * sizeof(float));
}
offset += input_size;
}
}
// Reorg layer - just change dimension sizes of the previous layer (some dimension sizes are increased by decreasing other)
void forward_reorg_layer_cpu(const layer l, network_state state)
{
float *out = l.output;
float *x = state.input;
int out_w = l.out_w;
int out_h = l.out_h;
int out_c = l.out_c;
int batch = l.batch;
int stride = l.stride;
int b, i, j, k;
int in_c = out_c / (stride*stride);
//printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward);
//printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride);
// batch
for (b = 0; b < batch; ++b) {
// channel
for (k = 0; k < out_c; ++k) {
// y
for (j = 0; j < out_h; ++j) {
// x
for (i = 0; i < out_w; ++i) {
int in_index = i + out_w*(j + out_h*(k + out_c*b));
int c2 = k % in_c;
int offset = k / in_c;
int w2 = i*stride + offset % stride;
int h2 = j*stride + offset / stride;
int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b));
out[in_index] = x[out_index];
}
}
}
}
}
// ---- upsample layer ----
// upsample_layer.c
void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
{
int i, j, k, b;
for (b = 0; b < batch; ++b) {
for (k = 0; k < c; ++k) {
for (j = 0; j < h*stride; ++j) {
for (i = 0; i < w*stride; ++i) {
int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride;
int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
if (forward) out[out_index] = scale*in[in_index];
else in[in_index] += scale*out[out_index];
}
}
}
}
}
// upsample_layer.c
void forward_upsample_layer_cpu(const layer l, network_state net)
{
fill_cpu(l.outputs*l.batch, 0, l.output, 1);
if (l.reverse) {
upsample_cpu(l.output, l.out_w, l.out_h, l.c, l.batch, l.stride, 0, l.scale, net.input);
}
else {
upsample_cpu(net.input, l.w, l.h, l.c, l.batch, l.stride, 1, l.scale, l.output);
}
}
// blas.c (shortcut_layer)
void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
{
int stride = w1 / w2;
int sample = w2 / w1;
assert(stride == h1 / h2);
assert(sample == h2 / h1);
if (stride < 1) stride = 1;
if (sample < 1) sample = 1;
int minw = (w1 < w2) ? w1 : w2;
int minh = (h1 < h2) ? h1 : h2;
int minc = (c1 < c2) ? c1 : c2;
int i, j, k, b;
for (b = 0; b < batch; ++b) {
for (k = 0; k < minc; ++k) {
for (j = 0; j < minh; ++j) {
for (i = 0; i < minw; ++i) {
int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
out[out_index] += add[add_index];
}
}
}
}
}
// blas.c
void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
{
int i;
for (i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
}
// shortcut_layer.c
void forward_shortcut_layer_cpu(const layer l, network_state state)
{
copy_cpu(l.outputs*l.batch, state.input, 1, l.output, 1);
shortcut_cpu(l.batch, l.w, l.h, l.c, state.net.layers[l.index].output, l.out_w, l.out_h, l.out_c, l.output);
activate_array(l.output, l.outputs*l.batch, l.activation);
}
// ---- yolo layer ----
void forward_yolo_layer_cpu(const layer l, network_state state)
{
int i, j, b, t, n;
memcpy(l.output, state.input, l.outputs*l.batch * sizeof(float));
#ifndef GPU
for (b = 0; b < l.batch; ++b) {
for (n = 0; n < l.n; ++n) {
int index = entry_index(l, b, n*l.w*l.h, 0);
activate_array(l.output + index, 2 * l.w*l.h, LOGISTIC);
index = entry_index(l, b, n*l.w*l.h, 4);
activate_array(l.output + index, (1 + l.classes)*l.w*l.h, LOGISTIC);
}
}
#endif
//memset(l.delta, 0, l.outputs * l.batch * sizeof(float));
}
// ---- region layer ----
static void softmax_cpu(float *input, int n, float temp, float *output)
{
int i;
float sum = 0;
float largest = -FLT_MAX;
for (i = 0; i < n; ++i) {
if (input[i] > largest) largest = input[i];
}
for (i = 0; i < n; ++i) {
float e = expf(input[i] / temp - largest / temp);
sum += e;
output[i] = e;
}
for (i = 0; i < n; ++i) {
output[i] /= sum;
}
}
static void softmax_tree(float *input, int batch, int inputs, float temp, tree *hierarchy, float *output)
{
int b;
for (b = 0; b < batch; ++b) {
int i;
int count = 0;
for (i = 0; i < hierarchy->groups; ++i) {
int group_size = hierarchy->group_size[i];
softmax_cpu(input + b*inputs + count, group_size, temp, output + b*inputs + count);
count += group_size;
}
}
}
// ---
// Region layer - just change places of array items, then do logistic_activate and softmax
void forward_region_layer_cpu(const layer l, network_state state)
{
int i, b;
int size = l.coords + l.classes + 1; // 4 Coords(x,y,w,h) + Classes + 1 Probability-t0
memcpy(l.output, state.input, l.outputs*l.batch * sizeof(float));
//flatten(l.output, l.w*l.h, size*l.n, l.batch, 1);
// convert many channels to the one channel (depth=1)
// (each grid cell will have a number of float-variables equal = to the initial number of channels)
{
float *x = l.output;
int layer_size = l.w*l.h; // W x H - size of layer
int layers = size*l.n; // number of channels (where l.n = number of anchors)
int batch = l.batch;
float *swap = calloc(layer_size*layers*batch, sizeof(float));
int i, c, b;
// batch index
for (b = 0; b < batch; ++b) {
// channel index
for (c = 0; c < layers; ++c) {
// layer grid index
for (i = 0; i < layer_size; ++i) {
int i1 = b*layers*layer_size + c*layer_size + i;
int i2 = b*layers*layer_size + i*layers + c;
swap[i2] = x[i1];
}
}
}
memcpy(x, swap, layer_size*layers*batch * sizeof(float));
free(swap);
}
// logistic activation only for: t0 (where is t0 = Probability * IoU(box, object))
for (b = 0; b < l.batch; ++b) {
// for each item (x, y, anchor-index)
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
float x = l.output[index + 4];
l.output[index + 4] = 1.0F / (1.0F + expf(-x)); // logistic_activate_cpu(l.output[index + 4]);
}
}
if (l.softmax_tree) { // Yolo 9000
for (b = 0; b < l.batch; ++b) {
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
softmax_tree(l.output + index + 5, 1, 0, 1, l.softmax_tree, l.output + index + 5);
}
}
}
else if (l.softmax) { // Yolo v2
// softmax activation only for Classes probability
for (b = 0; b < l.batch; ++b) {
// for each item (x, y, anchor-index)
for (i = 0; i < l.h*l.w*l.n; ++i) {
int index = size*i + b*l.outputs;
softmax_cpu(l.output + index + 5, l.classes, 1, l.output + index + 5);
}
}
}
}
void yolov2_forward_network_cpu(network net, network_state state)
{
state.workspace = net.workspace;
int i;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
forward_convolutional_layer_cpu(l, state);
//printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_cpu(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_cpu(l, state);
//printf("\n ROUTE \t\t\t l.n = %d \n", l.n);
}
else if (l.type == REORG) {
forward_reorg_layer_cpu(l, state);
//printf("\n REORG \n");
}
else if (l.type == UPSAMPLE) {
forward_upsample_layer_cpu(l, state);
//printf("\n UPSAMPLE \n");
}
else if (l.type == SHORTCUT) {
forward_shortcut_layer_cpu(l, state);
//printf("\n SHORTCUT \n");
}
else if (l.type == YOLO) {
forward_yolo_layer_cpu(l, state);
//printf("\n YOLO \n");
}
else if (l.type == REGION) {
forward_region_layer_cpu(l, state);
//printf("\n REGION \n");
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output;
}
}
// detect on CPU
float *network_predict_cpu(network net, float *input)
{
network_state state;
state.net = net;
state.index = 0;
state.input = input;
state.truth = 0;
state.train = 0;
state.delta = 0;
yolov2_forward_network_cpu(net, state); // network on CPU
//float *out = get_network_output(net);
int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return net.layers[i].output;
}
// --------------------
// x - last conv-layer output
// biases - anchors from cfg-file
// n - number of anchors from cfg-file
box get_region_box_cpu(float *x, float *biases, int n, int index, int i, int j, int w, int h)
{
box b;
b.x = (i + logistic_activate(x[index + 0])) / w; // (col + 1./(1. + exp(-x))) / width_last_layer
b.y = (j + logistic_activate(x[index + 1])) / h; // (row + 1./(1. + exp(-x))) / height_last_layer
b.w = expf(x[index + 2]) * biases[2 * n] / w; // exp(x) * anchor_w / width_last_layer
b.h = expf(x[index + 3]) * biases[2 * n + 1] / h; // exp(x) * anchor_h / height_last_layer
return b;
}
// get prediction boxes
void get_region_boxes_cpu(layer l, int w, int h, float thresh, float **probs, box *boxes, int only_objectness, int *map)
{
int i, j, n;
float *predictions = l.output;
// grid index
for (i = 0; i < l.w*l.h; ++i) {
int row = i / l.w;
int col = i % l.w;
// anchor index
for (n = 0; n < l.n; ++n) {
int index = i*l.n + n; // index for each grid-cell & anchor
int p_index = index * (l.classes + 5) + 4;
float scale = predictions[p_index]; // scale = t0 = Probability * IoU(box, object)
if (l.classfix == -1 && scale < .5) scale = 0; // if(t0 < 0.5) t0 = 0;
int box_index = index * (l.classes + 5);
boxes[index] = get_region_box_cpu(predictions, l.biases, n, box_index, col, row, l.w, l.h);
boxes[index].x *= w;
boxes[index].y *= h;
boxes[index].w *= w;
boxes[index].h *= h;
int class_index = index * (l.classes + 5) + 5;
// Yolo 9000 or Yolo v2
if (l.softmax_tree) {
// Yolo 9000
hierarchy_predictions(predictions + class_index, l.classes, l.softmax_tree, 0);
int found = 0;
if (map) {
for (j = 0; j < 200; ++j) {
float prob = scale*predictions[class_index + map[j]];
probs[index][j] = (prob > thresh) ? prob : 0;
}
}
else {
for (j = l.classes - 1; j >= 0; --j) {
if (!found && predictions[class_index + j] > .5) {
found = 1;
}
else {
predictions[class_index + j] = 0;
}
float prob = predictions[class_index + j];
probs[index][j] = (scale > thresh) ? prob : 0;
}
}
}
else
{
// Yolo v2
for (j = 0; j < l.classes; ++j) {
float prob = scale*predictions[class_index + j]; // prob = IoU(box, object) = t0 * class-probability
probs[index][j] = (prob > thresh) ? prob : 0; // if (IoU < threshold) IoU = 0;
}
}
if (only_objectness) {
probs[index][0] = scale;
}
}
}
}
// ------ Calibration --------
// detect on CPU
float *network_calibrate_cpu(network net, float *input)
{
network_state state;
state.net = net;
state.index = 0;
state.input = input;
state.truth = 0;
state.train = 0;
state.delta = 0;
//yolov2_forward_network_cpu(net, state); // network on CPU
// input calibration - for quantinization
static int max_num = 100;
static int counter = 0;
static float *input_mult_array = NULL;
if (net.do_input_calibration > 0) { // calibration for quantinization
max_num = net.do_input_calibration;
if (input_mult_array == NULL) {
input_mult_array = (float *)calloc(net.n * max_num, sizeof(float));
}
++counter;
// save calibration coefficients
if (counter > max_num) {
printf("\n\n Saving coefficients to the input_calibration.txt file... \n\n");
FILE* fw = fopen("input_calibration.txt", "wb");
char buff[1024];
//printf("\n float input_mult[] = { ");
char *str1 = "input_calibration = ";
printf("%s", str1);
fwrite(str1, sizeof(char), strlen(str1), fw);
int i;
for (i = 0; i < net.n; ++i)
if (net.layers[i].type == CONVOLUTIONAL) {
printf("%g, ", input_mult_array[0 + i*max_num]);
sprintf(buff, "%g, ", input_mult_array[0 + i*max_num]);
fwrite(buff, sizeof(char), strlen(buff), fw);
}
char *str2 = "16";
printf("%s \n ---------------------------", str2);
fwrite(str2, sizeof(char), strlen(str2), fw);
fclose(fw);
getchar();
exit(0);
}
}
state.workspace = net.workspace;
int i;
for (i = 0; i < net.n; ++i) {
state.index = i;
layer l = net.layers[i];
if (l.type == CONVOLUTIONAL) {
if (net.do_input_calibration) { // calibration for quantinization
//float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 8192, 2048);
float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 16, 4096);
//float multiplier = entropy_calibration(state.input, l.inputs, 1.0 / 4, 2*4096);
printf(" multiplier = %f, l.inputs = %d \n\n", multiplier, l.inputs);
input_mult_array[counter + i*max_num] = multiplier;
if (counter >= max_num) {
int j;
float res_mult = 0;
for (j = 0; j < max_num; ++j)
res_mult += input_mult_array[j + i*max_num];
res_mult = res_mult / max_num;
input_mult_array[0 + i*max_num] = res_mult;
printf(" res_mult = %f, max_num = %d \n", res_mult, max_num);
}
}
forward_convolutional_layer_cpu(l, state);
//printf("\n CONVOLUTIONAL \t\t l.size = %d \n", l.size);
}
else if (l.type == MAXPOOL) {
forward_maxpool_layer_cpu(l, state);
//printf("\n MAXPOOL \t\t l.size = %d \n", l.size);
}
else if (l.type == ROUTE) {
forward_route_layer_cpu(l, state);
//printf("\n ROUTE \t\t\t l.n = %d \n", l.n);
}
else if (l.type == REORG) {
forward_reorg_layer_cpu(l, state);
//printf("\n REORG \n");
}
else if (l.type == REGION) {
forward_region_layer_cpu(l, state);
//printf("\n REGION \n");
}
else {
printf("\n layer: %d \n", l.type);
}
state.input = l.output;
}
//int i;
for (i = net.n - 1; i > 0; --i) if (net.layers[i].type != COST) break;
return net.layers[i].output;
}
|
update_ops_matrix_dense_multi.c |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "constant.h"
#include "update_ops.h"
#include "utility.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
//void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim);
//void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim);
void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask);
void multi_qubit_dense_matrix_gate(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
if (target_qubit_index_count == 1) {
single_qubit_dense_matrix_gate(target_qubit_index_list[0], matrix, state, dim);
}
else if (target_qubit_index_count == 2) {
double_qubit_dense_matrix_gate_c(target_qubit_index_list[0], target_qubit_index_list[1], matrix, state, dim);
}
else {
//multi_qubit_dense_matrix_gate_old_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_old_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
//return;
#ifdef _OPENMP
UINT threshold = 10;
if (dim < (((ITYPE)1) << threshold)) {
multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
}
else {
multi_qubit_dense_matrix_gate_parallel(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
}
#else
multi_qubit_dense_matrix_gate_single(target_qubit_index_list, target_qubit_index_count, matrix, state, dim);
#endif
}
}
void create_shift_mask_list_from_list_buf(const UINT* array, UINT count, UINT* dst_array, ITYPE* dst_mask) {
memcpy(dst_array, array, sizeof(UINT)*count);
sort_ui(dst_array, count);
for (UINT i = 0; i < count; ++i) {
dst_mask[i] = (1UL << dst_array[i]) - 1;
}
}
void multi_qubit_dense_matrix_gate_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
UINT sort_array[64];
ITYPE mask_array[64];
create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array);
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) {
basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(buffer);
free((ITYPE*)matrix_mask_list);
}
#ifdef _OPENMP
void multi_qubit_dense_matrix_gate_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
UINT sort_array[64];
ITYPE mask_array[64];
create_shift_mask_list_from_list_buf(target_qubit_index_list, target_qubit_index_count, sort_array, mask_array);
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index;
for (state_index = start_index; state_index < end_index; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; ++cursor) {
basis_0 = (basis_0 & mask_array[cursor]) + ((basis_0 & (~mask_array[cursor])) << 1);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
}
free(buffer_list);
free((ITYPE*)matrix_mask_list);
}
#endif
/*
void multi_qubit_dense_matrix_gate_old_single(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
CTYPE* buffer = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim));
ITYPE state_index;
for (state_index = 0; state_index < loop_dim; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
free(buffer);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
#ifdef _OPENMP
void multi_qubit_dense_matrix_gate_old_parallel(const UINT* target_qubit_index_list, UINT target_qubit_index_count, const CTYPE* matrix, CTYPE* state, ITYPE dim) {
// matrix dim, mask, buffer
const ITYPE matrix_dim = 1ULL << target_qubit_index_count;
const ITYPE* matrix_mask_list = create_matrix_mask_list(target_qubit_index_list, target_qubit_index_count);
// insert index
const UINT* sorted_insert_index_list = create_sorted_ui_list(target_qubit_index_list, target_qubit_index_count);
// loop variables
const ITYPE loop_dim = dim >> target_qubit_index_count;
const UINT thread_count = omp_get_max_threads();
CTYPE* buffer_list = (CTYPE*)malloc((size_t)(sizeof(CTYPE)*matrix_dim*thread_count));
const ITYPE block_size = loop_dim / thread_count;
const ITYPE residual = loop_dim % thread_count;
#pragma omp parallel
{
UINT thread_id = omp_get_thread_num();
ITYPE start_index = block_size * thread_id + (residual > thread_id ? thread_id : residual);
ITYPE end_index = block_size * (thread_id + 1) + (residual > (thread_id + 1) ? (thread_id + 1) : residual);
CTYPE* buffer = buffer_list + thread_id * matrix_dim;
ITYPE state_index;
for (state_index = start_index; state_index < end_index; ++state_index) {
// create base index
ITYPE basis_0 = state_index;
for (UINT cursor = 0; cursor < target_qubit_index_count; cursor++) {
UINT insert_index = sorted_insert_index_list[cursor];
basis_0 = insert_zero_to_basis_index(basis_0, 1ULL << insert_index, insert_index);
}
// compute matrix-vector multiply
for (ITYPE y = 0; y < matrix_dim; ++y) {
buffer[y] = 0;
for (ITYPE x = 0; x < matrix_dim; ++x) {
buffer[y] += matrix[y*matrix_dim + x] * state[basis_0 ^ matrix_mask_list[x]];
}
}
// set result
for (ITYPE y = 0; y < matrix_dim; ++y) {
state[basis_0 ^ matrix_mask_list[y]] = buffer[y];
}
}
}
free(buffer_list);
free((UINT*)sorted_insert_index_list);
free((ITYPE*)matrix_mask_list);
}
#endif
*/ |
build.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <stdbool.h>
#include <sys/time.h>
#include <unistd.h>
#include "mkl.h"
#include <immintrin.h>
#include "omp.h"
void build(char* build_file, char* data_folder, char* dest_folder, char* eval_mode, int rank);
void print_float_arr(float *arr, long long int num_elements);
void print_int_arr(int *arr, int num_elements);
int* get_nonzero_summation_term_idx(char* build_file, char* data_folder, char* eval_mode, int rank);
float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank);
void scopy_sequential(long long int n, float *src, float *dst);
void scopy_par(long long int n, float *src, float *dst);
double get_sec();
int main(int argc, char** argv) {
char *build_file = argv[1];
char *data_folder = argv[2];
char *dest_folder = argv[3];
int rank = atoi(argv[4]);
int recursion_layer = atoi(argv[5]);
char *eval_mode = argv[6];
build(build_file,data_folder,dest_folder,eval_mode,rank);
// printf("recursion_layer %d build rank %d DONE\n",recursion_layer,rank);
return 0;
}
int* get_nonzero_summation_term_idx(char* build_file, char* data_folder, char* eval_mode, int rank) {
int total_active_qubit, num_subcircuits, num_summation_terms, num_cuts;
FILE* build_fptr = fopen(build_file, "r");
fscanf(build_fptr,"total_active_qubit=%d num_subcircuits=%d num_summation_terms=%d num_cuts=%d\n",\
&total_active_qubit,&num_subcircuits,&num_summation_terms,&num_cuts);
int summation_term_ctr;
int num_nonzero_summation_terms = 0;
int *non_zero_summation_term_idx = calloc(num_summation_terms+1,sizeof(int));
for (summation_term_ctr=0;summation_term_ctr<num_summation_terms;summation_term_ctr++) {
bool summation_term_is_zero = false;
int subcircuit_ctr;
for (subcircuit_ctr=0;subcircuit_ctr<num_subcircuits;subcircuit_ctr++) {
int subcircuit_idx, subcircuit_kron_index;
fscanf(build_fptr,"%d,%d ",&subcircuit_idx,&subcircuit_kron_index);
char *build_data_file = malloc(256*sizeof(char));
sprintf(build_data_file, "%s/kron_%d_%d.txt", data_folder, subcircuit_idx, subcircuit_kron_index);
if(access(build_data_file, F_OK) == -1) {
// file doesn't exist
summation_term_is_zero = true;
}
free(build_data_file);
}
if (!summation_term_is_zero || strcmp(eval_mode,"runtime")==0) {
// Add if summation term is not zero or in runtime mode
non_zero_summation_term_idx[num_nonzero_summation_terms+1] = summation_term_ctr;
num_nonzero_summation_terms++;
}
}
fclose(build_fptr);
non_zero_summation_term_idx[0] = num_nonzero_summation_terms;
// printf("num_subcircuits %d non_zero_num_summation_terms %d/%d\n",\
// num_subcircuits,num_nonzero_summation_terms,num_summation_terms);
return non_zero_summation_term_idx;
}
void build(char* build_file, char* data_folder, char* dest_folder, char* eval_mode, int rank) {
int *non_zero_summation_term_idx = get_nonzero_summation_term_idx(build_file,data_folder,eval_mode,rank);
int total_active_qubit, num_subcircuits, num_summation_terms, num_cuts;
FILE* build_fptr = fopen(build_file, "r");
fscanf(build_fptr,"total_active_qubit=%d num_subcircuits=%d num_summation_terms=%d num_cuts=%d\n",\
&total_active_qubit,&num_subcircuits,&num_summation_terms,&num_cuts);
long long int reconstruction_len = (long long int) pow(2,total_active_qubit);
float *reconstructed_prob = (float*) calloc(reconstruction_len,sizeof(float));
// cblas_sger parameters
MKL_INT incx, incy;
CBLAS_LAYOUT layout = CblasRowMajor;
float alpha = 1;
incx = 1;
incy = 1;
int summation_term_ctr;
int non_zero_summation_term_ctr = 1;
int num_non_zero_summation_terms_remaining = non_zero_summation_term_idx[0];
double total_build_time = 0;
double log_time = 0;
for (summation_term_ctr=0;summation_term_ctr<num_summation_terms;summation_term_ctr++) {
double build_begin = get_sec();
if (num_non_zero_summation_terms_remaining==0) {
// printf("Rank %d : no more remaining non_zero summation terms\n",rank);
break;
}
else if (summation_term_ctr==non_zero_summation_term_idx[non_zero_summation_term_ctr]) {
// printf("\nRank %d : summation term %d is nonzero\n",rank,summation_term_ctr);
float *summation_term = (float*) calloc(reconstruction_len,sizeof(float));
int subcircuit_ctr;
long long int summation_term_accumulated_len=1;
for (subcircuit_ctr=0;subcircuit_ctr<num_subcircuits;subcircuit_ctr++) {
// Read subcircuit
int subcircuit_idx, subcircuit_kron_index;
fscanf(build_fptr,"%d,%d ",&subcircuit_idx,&subcircuit_kron_index);
// printf("Subcircuit %d, kron term %d\n",subcircuit_idx,subcircuit_kron_index);
if (strcmp(eval_mode,"runtime")==0) {
subcircuit_kron_index = 0;
}
char *build_data_file = malloc(256*sizeof(char));
sprintf(build_data_file, "%s/kron_%d_%d.txt", data_folder, subcircuit_idx, subcircuit_kron_index);
// printf("Reading file %s\n",build_data_file);
FILE* build_data_fptr = fopen(build_data_file, "r");
int num_active;
fscanf(build_data_fptr,"num_active %d\n",&num_active);
// printf("num_active %d\n",num_active);
long long int subcircuit_active_len = (long long int) pow(2,num_active);
long long int state_ctr;
if (subcircuit_ctr==0) {
for (state_ctr=0;state_ctr<subcircuit_active_len;state_ctr++) {
// printf("Read state %d\n",state_ctr);
if (strcmp(eval_mode,"runtime")==0) {
summation_term[state_ctr] = (double)1.0/subcircuit_active_len;
}
else{
fscanf(build_data_fptr,"%f ",&summation_term[state_ctr]);
}
}
summation_term_accumulated_len *= subcircuit_active_len;
// printf("subcircuit kron term %d:\n",subcircuit_ctr);
// print_float_arr(summation_term,summation_term_accumulated_len);
}
else {
float *subcircuit_kron_term = (float*) calloc(subcircuit_active_len,sizeof(float));
for (state_ctr=0;state_ctr<subcircuit_active_len;state_ctr++) {
// printf("Read state %d\n",state_ctr);
if (strcmp(eval_mode,"runtime")==0) {
subcircuit_kron_term[state_ctr] = (double)1.0/subcircuit_active_len;
}
else{
fscanf(build_data_fptr,"%f ",&subcircuit_kron_term[state_ctr]);
}
}
// printf("subcircuit kron term %d:\n",subcircuit_ctr);
// print_float_arr(subcircuit_kron_term,subcircuit_active_len);
float *dummy_summation_term = (float*) calloc(summation_term_accumulated_len*subcircuit_active_len,sizeof(float));
cblas_sger(layout, summation_term_accumulated_len, subcircuit_active_len, alpha, summation_term, incx, subcircuit_kron_term, incy, dummy_summation_term, subcircuit_active_len);
summation_term_accumulated_len *= subcircuit_active_len;
cblas_scopy(summation_term_accumulated_len, dummy_summation_term, 1, summation_term, 1);
// scopy_par(summation_term_accumulated_len, dummy_summation_term, summation_term);
free(dummy_summation_term);
free(subcircuit_kron_term);
}
fclose(build_data_fptr);
free(build_data_file);
// printf("---> ");
// print_float_arr(summation_term,summation_term_accumulated_len);
}
vsAdd(reconstruction_len, reconstructed_prob, summation_term, reconstructed_prob);
free(summation_term);
non_zero_summation_term_ctr++;
num_non_zero_summation_terms_remaining--;
}
else {
// printf("Rank %d : summation term %d is zero\n",rank,summation_term_ctr);
char line[256];
fgets(line, sizeof(line), build_fptr);
}
log_time += get_sec() - build_begin;
total_build_time += get_sec() - build_begin;
log_time = print_log(log_time,total_build_time,summation_term_ctr+1,num_summation_terms,30,rank);
if (total_build_time>600 && strcmp(eval_mode,"runtime")==0) {
break;
}
}
if (strcmp(eval_mode,"runtime")==0 && summation_term_ctr<num_summation_terms) {
double scaled_total_build_time = total_build_time/(summation_term_ctr+1)*num_summation_terms;
printf("Computed %d/%d, runtime scaling: %.3e-->%.3e\n",\
summation_term_ctr+1,num_summation_terms,total_build_time,scaled_total_build_time);
total_build_time = scaled_total_build_time;
}
cblas_sscal(reconstruction_len, pow(0.5,num_cuts), reconstructed_prob, 1);
// print_float_arr(reconstructed_prob,reconstruction_len);
char *build_result_file = malloc(256*sizeof(char));
sprintf(build_result_file, "%s/reconstructed_prob_%d.txt", dest_folder, rank);
FILE* build_data_fptr = fopen(build_result_file, "w");
long long int state_ctr;
for (state_ctr=0;state_ctr<reconstruction_len;state_ctr++) {
fprintf(build_data_fptr,"%e ",reconstructed_prob[state_ctr]);
}
fclose(build_data_fptr);
free(build_result_file);
fclose(build_fptr);
free(non_zero_summation_term_idx);
free(reconstructed_prob);
// printf("Rank %d build DONE\n", rank);
char *summary_file = malloc(256*sizeof(char));
sprintf(summary_file, "%s/rank_%d_summary.txt", dest_folder, rank);
FILE *summary_fptr = fopen(summary_file, "a");
fprintf(summary_fptr,"\nTotal build time = %e\n",total_build_time);
fprintf(summary_fptr,"DONE");
free(summary_file);
fclose(summary_fptr);
return;
}
void scopy_sequential(long long int n, float *src, float *dst) {
long long int n32 = n & -32;
long long int i;
float *src_curr_pos = src, *dst_curr_pos = dst;
for (i = 0; i < n32; i += 32){
_mm256_storeu_ps(dst_curr_pos, _mm256_loadu_ps(src_curr_pos));
_mm256_storeu_ps(dst_curr_pos+8, _mm256_loadu_ps(src_curr_pos+8));
_mm256_storeu_ps(dst_curr_pos+16, _mm256_loadu_ps(src_curr_pos+16));
_mm256_storeu_ps(dst_curr_pos+24, _mm256_loadu_ps(src_curr_pos+24));
src_curr_pos += 32; dst_curr_pos += 32;
}
if (n32 == n) return;
src_curr_pos = src + n32;
dst_curr_pos = dst + n32;
for (i = n32; i < n; i++){
*dst_curr_pos = *src_curr_pos;
dst_curr_pos++;
src_curr_pos++;
}
}
void scopy_par(long long int n, float *src, float *dst) {
int TOTAL_THREADS=atoi(getenv("OMP_NUM_THREADS"));
if (TOTAL_THREADS<=1){
scopy_sequential(n,src,dst);
return;
}
int tid;
int max_cpu_num=(int)sysconf(_SC_NPROCESSORS_ONLN);
if (TOTAL_THREADS>max_cpu_num) TOTAL_THREADS=max_cpu_num;
#pragma omp parallel for schedule(static)
for (tid = 0; tid < TOTAL_THREADS; tid++){
long int NUM_DIV_NUM_THREADS = n / TOTAL_THREADS * TOTAL_THREADS;
long int DIM_LEN = n / TOTAL_THREADS;
long int EDGE_LEN = (NUM_DIV_NUM_THREADS == n) ? n / TOTAL_THREADS : n - NUM_DIV_NUM_THREADS + DIM_LEN;
if (tid == 0)
scopy_sequential(EDGE_LEN,src,dst);
else
scopy_sequential(DIM_LEN,src + EDGE_LEN + (tid - 1) * DIM_LEN, dst + EDGE_LEN + (tid - 1) * DIM_LEN);
}
return;
}
void print_int_arr(int *arr, int num_elements) {
int ctr;
if (num_elements<=10) {
for (ctr=0;ctr<num_elements;ctr++) {
printf("%d ",arr[ctr]);
}
}
else {
for (ctr=0;ctr<5;ctr++) {
printf("%d ",arr[ctr]);
}
printf(" ... ");
for (ctr=num_elements-5;ctr<num_elements;ctr++) {
printf("%d ",arr[ctr]);
}
}
printf(" = %d elements\n",num_elements);
}
void print_float_arr(float *arr, long long int num_elements) {
long long int ctr;
if (num_elements<=10) {
for (ctr=0;ctr<num_elements;ctr++) {
printf("%e ",arr[ctr]);
}
}
else {
for (ctr=0;ctr<5;ctr++) {
printf("%e ",arr[ctr]);
}
printf(" ... ");
for (ctr=num_elements-5;ctr<num_elements;ctr++) {
printf("%e ",arr[ctr]);
}
}
printf(" = %lld elements\n",num_elements);
}
float print_log(double log_time, double elapsed_time, int num_finished_jobs, int num_total_jobs, double log_frequency, int rank) {
if (log_time>log_frequency) {
double eta = elapsed_time/num_finished_jobs*num_total_jobs - elapsed_time;
printf("Rank %d finished building %d/%d, elapsed = %e, ETA = %e\n",rank,num_finished_jobs,num_total_jobs,elapsed_time,eta);
return 0;
}
else {
return log_time;
}
}
double get_sec() {
struct timeval time;
gettimeofday(&time, NULL);
return (time.tv_sec + 1e-6 * time.tv_usec);
}
|
sageInterface.h | #ifndef ROSE_SAGE_INTERFACE
#define ROSE_SAGE_INTERFACE
#include "sage3basic.hhh"
#include <stdint.h>
#include <utility>
#include "rosePublicConfig.h" // for ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
#include "OmpAttribute.h"
#if 0 // FMZ(07/07/2010): the argument "nextErrorCode" should be call-by-reference
SgFile* determineFileType ( std::vector<std::string> argv, int nextErrorCode, SgProject* project );
#else
SgFile* determineFileType ( std::vector<std::string> argv, int& nextErrorCode, SgProject* project );
#endif
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "rewrite.h"
#endif
// DQ (7/20/2008): Added support for unparsing abitrary strings in the unparser.
#include "astUnparseAttribute.h"
#include <set>
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
#include "LivenessAnalysis.h"
#include "abstract_handle.h"
#include "ClassHierarchyGraph.h"
#endif
// DQ (8/19/2004): Moved from ROSE/src/midend/astRewriteMechanism/rewrite.h
//! A global function for getting the string associated with an enum (which is defined in global scope)
ROSE_DLL_API std::string getVariantName (VariantT v);
// DQ (12/9/2004): Qing, Rich and Dan have decided to start this namespace within ROSE
// This namespace is specific to interface functions that operate on the Sage III AST.
// The name was chosen so as not to conflict with other classes within ROSE.
// This will become the future home of many interface functions which operate on
// the AST and which are generally useful to users. As a namespace multiple files can be used
// to represent the compete interface and different developers may contribute interface
// functions easily.
// Constructor handling: (We have sageBuilder.h now for this purpose, Liao 2/1/2008)
// We could add simpler layers of support for construction of IR nodes by
// hiding many details in "makeSg***()" functions. Such functions would
// return pointers to the associated Sg*** objects and would be able to hide
// many IR specific details, including:
// memory handling
// optional parameter settings not often required
// use of Sg_File_Info objects (and setting them as transformations)
//
// namespace AST_Interface (this name is taken already by some of Qing's work :-)
//! An alias for Sg_File_Info::generateDefaultFileInfoForTransformationNode()
#define TRANS_FILE Sg_File_Info::generateDefaultFileInfoForTransformationNode()
/** Functions that are useful when operating on the AST.
*
* The Sage III IR design attempts to be minimalist. Thus additional functionality is intended to be presented using separate
* higher level interfaces which work with the IR. This namespace collects functions that operate on the IR and support
* numerous types of operations that are common to general analysis and transformation of the AST. */
namespace SageInterface
{
// Liao 6/22/2016: keep records of loop init-stmt normalization, later help undo it to support autoPar.
struct Transformation_Record
{
// a lookup table to check if a for loop has been normalized for its c99-style init-stmt
std::map <SgForStatement* , bool > forLoopInitNormalizationTable;
// Detailed record about the original declaration (1st in the pair) and the normalization generated new declaration (2nd in the pair)
std::map <SgForStatement* , std::pair<SgVariableDeclaration*, SgVariableDeclaration*> > forLoopInitNormalizationRecord;
} ;
ROSE_DLL_API extern Transformation_Record trans_records;
// DQ (4/3/2014): Added general AST support separate from the AST.
// Container and API for analysis information that is outside of the AST and as a result
// prevents frequent modification of the IR.
class DeclarationSets
{
// DQ (4/3/2014): This stores all associated declarations as a map of sets.
// the key to the map is the first nondefining declaration and the elements of the set are
// all of the associated declarations (including the defining declaration).
private:
//! Map of first-nondefining declaration to all other associated declarations.
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > declarationMap;
public:
void addDeclaration(SgDeclarationStatement* decl);
const std::set<SgDeclarationStatement*>* getDeclarations(SgDeclarationStatement* decl);
std::map<SgDeclarationStatement*,std::set<SgDeclarationStatement*>* > & getDeclarationMap();
bool isLocatedInDefiningScope(SgDeclarationStatement* decl);
};
// DQ (4/3/2014): This constructs a data structure that holds analysis information about
// the AST that is separate from the AST. This is intended to be a general mechanism
// to support analysis information without constantly modifying the IR.
DeclarationSets* buildDeclarationSets(SgNode*);
//! An internal counter for generating unique SgName
ROSE_DLL_API extern int gensym_counter;
// tps : 28 Oct 2008 - support for finding the main interpretation
SgAsmInterpretation* getMainInterpretation(SgAsmGenericFile* file);
//! Get the unsigned value of a disassembled constant.
uint64_t getAsmConstant(SgAsmValueExpression* e);
//! Get the signed value of a disassembled constant.
int64_t getAsmSignedConstant(SgAsmValueExpression *e);
//! Function to add "C" style comment to statement.
void addMessageStatement( SgStatement* stmt, std::string message );
//! A persistent attribute to represent a unique name for an expression
class UniqueNameAttribute : public AstAttribute
{
private:
std::string name;
public:
UniqueNameAttribute(std::string n="") {name =n; };
void set_name (std::string n) {name = n;};
std::string get_name () {return name;};
};
// DQ (3/2/2009): Added support for collectiong an merging the referenced symbols in the outlined
// function into the list used to edit the outlined code subtree to fixup references (from symbols
// in the original file to the symbols in the newer separate file).
// typedef rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> ReplacementMapType;
// void supplementReplacementSymbolMap ( const ReplacementMapTraversal::ReplacementMapType & inputReplacementMap );
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
inline size_t hash_value(SgNode* t) {return (size_t)t;}
#endif
#if 0
// DQ (8/3/2015): We expect that this is not used and is generating a warnings so we
// can best fix it by removing it.
struct hash_nodeptr
{
// CH (4/9/2010): Use boost::hash instead
//#ifndef _MSC_VER
#if 0
//rose_hash::hash<char*> hasher;
#endif
public:
size_t operator()(SgNode* node) const
{
// CH (4/9/2010): Use boost::hash instead
//#ifdef _MSC_VER
#if 0
return (size_t) hash_value(node);
#else
return (size_t) node;
#endif
}
};
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void supplementReplacementSymbolMap ( rose_hash::unordered_map<SgNode*, SgNode*, hash_nodeptr> & inputReplacementMap );
#endif
#endif
//------------------------------------------------------------------------
//@{
/*! @name Symbol tables
\brief utility functions for symbol tables
*/
// Liao 1/22/2008, used for get symbols for generating variable reference nodes
// ! Find a variable symbol in current and ancestor scopes for a given name
ROSE_DLL_API SgVariableSymbol *lookupVariableSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
//! Find a symbol in current and ancestor scopes for a given variable name, starting from top of ScopeStack if currentscope is not given or NULL.
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope=NULL);
// SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList);
ROSE_DLL_API SgSymbol *lookupSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
// DQ (11/24/2007): Functions moved from the Fortran support so that they could be called from within astPostProcessing.
//!look up the first matched function symbol in parent scopes given only a function name, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName, SgScopeStatement *currentScope=NULL);
// Liao, 1/24/2008, find exact match for a function
//!look up function symbol in parent scopes given both name and function type, starting from top of ScopeStack if currentscope is not given or NULL
ROSE_DLL_API SgFunctionSymbol *lookupFunctionSymbolInParentScopes (const SgName & functionName,
const SgType* t,
SgScopeStatement *currentScope=NULL);
// DQ (8/21/2013): Modified to make newest function parameters be default arguments.
// DQ (8/16/2013): For now we want to remove the use of default parameters and add the support for template parameters and template arguments.
// DQ (5/7/2011): Added support for SgClassSymbol (used in name qualification support).
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
ROSE_DLL_API SgTypedefSymbol* lookupTypedefSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#if 0
// DQ (8/13/2013): This function does not make since any more, now that we have made the symbol
// table handling more precise and we have to provide template parameters for any template lookup.
// We also have to know if we want to lookup template classes, template functions, or template
// member functions (since each have specific requirements).
SgTemplateSymbol* lookupTemplateSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
#endif
#if 0
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
// Where these are called we might not know enough information about the template parameters or function
// types, for example.
SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL, SgTemplateArgumentPtrList* templateArgumentList = NULL);
SgTemplateFunctionSymbol* lookupTemplateFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
SgTemplateMemberFunctionSymbol* lookupTemplateMemberFunctionSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL, SgTemplateParameterPtrList* templateParameterList = NULL);
#endif
// DQ (8/21/2013): Modified to make some of the newest function parameters be default arguments.
// DQ (8/13/2013): I am not sure if we want this functions in place of lookupTemplateSymbolInParentScopes.
ROSE_DLL_API SgTemplateClassSymbol* lookupTemplateClassSymbolInParentScopes (const SgName & name, SgTemplateParameterPtrList* templateParameterList, SgTemplateArgumentPtrList* templateArgumentList, SgScopeStatement *cscope = NULL);
ROSE_DLL_API SgEnumSymbol* lookupEnumSymbolInParentScopes (const SgName & name, SgScopeStatement *currentScope = NULL);
ROSE_DLL_API SgNamespaceSymbol* lookupNamespaceSymbolInParentScopes(const SgName & name, SgScopeStatement *currentScope = NULL);
// DQ (7/17/2011): Added function from cxx branch that I need here for the Java support.
// SgClassSymbol* lookupClassSymbolInParentScopes (const SgName & name, SgScopeStatement *cscope);
/*! \brief set_name of symbol in symbol table.
This function extracts the symbol from the relavant symbol table,
changes the name (at the declaration) and reinserts it into the
symbol table.
\internal I think this is what this function does, I need to double check.
*/
// DQ (12/9/2004): Moved this function (by Alin Jula) from being a member of SgInitializedName
// to this location where it can be a part of the interface for the Sage III AST.
ROSE_DLL_API int set_name (SgInitializedName * initializedNameNode, SgName new_name);
/*! \brief Output function type symbols in global function type symbol table.
*/
void outputGlobalFunctionTypeSymbolTable ();
// DQ (6/27/2005):
/*! \brief Output the local symbol tables.
\implementation Each symbol table is output with the file infor where it is located in the source code.
*/
ROSE_DLL_API void outputLocalSymbolTables (SgNode * node);
class OutputLocalSymbolTables:public AstSimpleProcessing
{
public:
void visit (SgNode * node);
};
/*! \brief Regenerate the symbol table.
\implementation current symbol table must be NULL pointer before calling this
function (for safety, but is this a good idea?)
*/
// DQ (9/28/2005):
void rebuildSymbolTable (SgScopeStatement * scope);
/*! \brief Clear those variable symbols with unknown type (together with initialized names) which are also not referenced by any variable references or declarations under root. If root is NULL, all symbols with unknown type will be deleted.
*/
void clearUnusedVariableSymbols (SgNode* root = NULL);
// DQ (3/1/2009):
//! All the symbol table references in the copied AST need to be reset after rebuilding the copied scope's symbol table.
void fixupReferencesToSymbols( const SgScopeStatement* this_scope, SgScopeStatement* copy_scope, SgCopyHelp & help );
//@}
//------------------------------------------------------------------------
//@{
/*! @name Stringify
\brief Generate a useful string (name) to describe a SgNode
*/
/*! \brief Generate a useful name to describe the SgNode
\internal default names are used for SgNode objects that can not be associated with a name.
*/
// DQ (9/21/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgNode * node);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgStatement * stmt);
/*! \brief Generate a useful name to describe the expression
\internal default names are used for expressions that can not be associated with a name.
*/
std::string get_name (const SgExpression * expr);
/*! \brief Generate a useful name to describe the declaration
\internal default names are used for declarations that can not be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgDeclarationStatement * declaration);
/*! \brief Generate a useful name to describe the scope
\internal default names are used for scope that cannot be associated with a name.
*/
// DQ (6/13/2005): General function for extracting the name of declarations (when they have names)
std::string get_name (const SgScopeStatement * scope);
/*! \brief Generate a useful name to describe the SgSymbol
\internal default names are used for SgSymbol objects that cannot be associated with a name.
*/
// DQ (2/11/2007): Added this function to make debugging support more complete (useful for symbol table debugging support).
std::string get_name (const SgSymbol * symbol);
/*! \brief Generate a useful name to describe the SgType
\internal default names are used for SgType objects that cannot be associated with a name.
*/
std::string get_name (const SgType * type);
/*! \brief Generate a useful name to describe the SgSupport IR node
*/
std::string get_name (const SgSupport * node);
/*! \brief Generate a useful name to describe the SgLocatedNodeSupport IR node
*/
std::string get_name (const SgLocatedNodeSupport * node);
/*! \brief Generate a useful name to describe the SgC_PreprocessorDirectiveStatement IR node
*/
std::string get_name ( const SgC_PreprocessorDirectiveStatement* directive );
/*! \brief Generate a useful name to describe the SgToken IR node
*/
std::string get_name ( const SgToken* token );
// DQ (3/20/2016): Added to refactor some of the DSL infrastructure support.
/*! \brief Generate a useful name to support construction of identifiers from declarations.
This function permits names to be generated that will be unique across translation units
(a specific requirement different from the context of the get_name() functions above).
\internal This supports only a restricted set of declarations presently.
*/
std::string generateUniqueNameForUseAsIdentifier ( SgDeclarationStatement* declaration );
std::string generateUniqueNameForUseAsIdentifier_support ( SgDeclarationStatement* declaration );
/*! \brief Global map of name collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
extern std::map<std::string,int> local_name_collision_map;
extern std::map<std::string,SgNode*> local_name_to_node_map;
extern std::map<SgNode*,std::string> local_node_to_name_map;
/*! \brief Traversal to set the global map of names to node and node to names.collisions to support generateUniqueNameForUseAsIdentifier() function.
*/
void computeUniqueNameForUseAsIdentifier( SgNode* astNode );
/*! \brief Reset map variables used to support generateUniqueNameForUseAsIdentifier() function.
*/
void reset_name_collision_map();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Class utilities
\brief
*/
/*! \brief Get the default destructor from the class declaration
*/
// DQ (6/21/2005): Get the default destructor from the class declaration
SgMemberFunctionDeclaration *getDefaultDestructor (SgClassDeclaration *
classDeclaration);
/*! \brief Get the default constructor from the class declaration
*/
// DQ (6/22/2005): Get the default constructor from the class declaration
ROSE_DLL_API SgMemberFunctionDeclaration *getDefaultConstructor (SgClassDeclaration *
classDeclaration);
/*! \brief Return true if template definition is in the class, false if outside of class.
*/
// DQ (8/27/2005):
bool templateDefinitionIsInClass (SgTemplateInstantiationMemberFunctionDecl
* memberFunctionDeclaration);
/*! \brief Generate a non-defining (forward) declaration from a defining function declaration.
\internal should put into sageBuilder ?
*/
// DQ (9/17/2005):
SgTemplateInstantiationMemberFunctionDecl*
buildForwardFunctionDeclaration
(SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! Check if a SgNode is a declaration for a structure
bool isStructDeclaration(SgNode * node);
//! Check if a SgNode is a declaration for a union
bool isUnionDeclaration(SgNode * node);
#if 0
// DQ (8/28/2005): This is already a member function of the SgFunctionDeclaration
// (so that it can handle template functions and member functions)
/*! \brief Return true if member function of a template member function,
of false if a non-template member function in a templated class.
*/
// DQ (8/27/2005):
bool isTemplateMemberFunction (SgTemplateInstantiationMemberFunctionDecl *
memberFunctionDeclaration);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Misc.
\brief Not sure the classifications right now
*/
//! Save AST into a pdf file. Start from a node to find its enclosing file node. The entire file's AST will be saved into a pdf.
void saveToPDF(SgNode* node, std::string filename);
void saveToPDF(SgNode* node); // enable calling from gdb
// DQ (2/12/2012): Added some diagnostic support.
//! Diagnostic function for tracing back through the parent list to understand at runtime where in the AST a failure happened.
void whereAmI(SgNode* node);
//! Extract a SgPragmaDeclaration's leading keyword . For example "#pragma omp parallel" has a keyword of "omp".
std::string extractPragmaKeyword(const SgPragmaDeclaration *);
//! Check if a node is SgOmp*Statement
ROSE_DLL_API bool isOmpStatement(SgNode* );
/*! \brief Return true if function is overloaded.
*/
// DQ (8/27/2005):
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
// DQ (2/14/2012): Added support function used for variable declarations in conditionals.
//! Support function used for variable declarations in conditionals
void initializeIfStmt(SgIfStmt *ifstmt, SgStatement* conditional, SgStatement * true_body, SgStatement * false_body);
//! Support function used for variable declarations in conditionals
void initializeSwitchStatement(SgSwitchStatement* switchStatement,SgStatement *item_selector,SgStatement *body);
//! Support function used for variable declarations in conditionals
void initializeWhileStatement(SgWhileStmt* whileStatement, SgStatement * condition, SgStatement *body, SgStatement *else_body);
//! Generate unique names for expressions and attach the names as persistent attributes ("UniqueNameAttribute")
void annotateExpressionsWithUniqueNames (SgProject* project);
//! Check if a SgNode is a main() function declaration
ROSE_DLL_API bool isMain (const SgNode* node);
// DQ (6/22/2005):
/*! \brief Generate unique name from C and C++ constructs. The name may contain space.
This is support for the AST merge, but is generally useful as a more general mechanism than
name mangling which is more closely ties to the generation of names to support link-time function name
resolution. This is more general than common name mangling in that it resolves more relevant differences
between C and C++ declarations. (e.g. the type within the declaration: "struct { int:8; } foo;").
\implementation current work does not support expressions.
*/
std::string generateUniqueName ( const SgNode * node, bool ignoreDifferenceBetweenDefiningAndNondefiningDeclarations);
/** Generate a name like __temp#__ that is unique in the current scope and any parent and children scopes. # is a unique integer counter.
* @param baseName the word to be included in the variable names. */
std::string generateUniqueVariableName(SgScopeStatement* scope, std::string baseName = "temp");
// DQ (8/10/2010): Added const to first parameter.
// DQ (3/10/2007):
//! Generate a unique string from the source file position information
std::string declarationPositionString (const SgDeclarationStatement * declaration);
// DQ (1/20/2007):
//! Added mechanism to generate project name from list of file names
ROSE_DLL_API std::string generateProjectName (const SgProject * project, bool supressSuffix = false );
//! Given a SgExpression that represents a named function (or bound member
//! function), return the mentioned function
SgFunctionDeclaration* getDeclarationOfNamedFunction(SgExpression* func);
//! Get the mask expression from the header of a SgForAllStatement
SgExpression* forallMaskExpression(SgForAllStatement* stmt);
//! Find all SgPntrArrRefExp under astNode, then add SgVarRefExp (if any) of SgPntrArrRefExp's dim_info into NodeList_t
void addVarRefExpFromArrayDimInfo(SgNode * astNode, Rose_STL_Container<SgNode *>& NodeList_t);
// DQ (10/6/2006): Added support for faster mangled name generation (caching avoids recomputation).
/*! \brief Support for faster mangled name generation (caching avoids recomputation).
*/
#ifndef SWIG
// DQ (3/10/2013): This appears to be a problem for the SWIG interface (undefined reference at link-time).
void clearMangledNameCache (SgGlobal * globalScope);
void resetMangledNameCache (SgGlobal * globalScope);
#endif
std::string getMangledNameFromCache (SgNode * astNode);
std::string addMangledNameToCache (SgNode * astNode, const std::string & mangledName);
SgDeclarationStatement * getNonInstantiatonDeclarationForClass (SgTemplateInstantiationMemberFunctionDecl * memberFunctionInstantiation);
//! a better version for SgVariableDeclaration::set_baseTypeDefininingDeclaration(), handling all side effects automatically
//! Used to have a struct declaration embedded into a variable declaration
void setBaseTypeDefiningDeclaration(SgVariableDeclaration* var_decl, SgDeclarationStatement *base_decl);
// DQ (10/14/2006): This function tests the AST to see if for a non-defining declaration, the
// bool declarationPreceedsDefinition ( SgClassDeclaration* classNonDefiningDeclaration, SgClassDeclaration* classDefiningDeclaration );
//! Check if a defining declaration comes before of after the non-defining declaration.
bool declarationPreceedsDefinition (SgDeclarationStatement *nonDefiningDeclaration, SgDeclarationStatement *definingDeclaration);
// DQ (10/19/2006): Function calls have interesting context dependent rules to determine if
// they are output with a global qualifier or not. Were this is true we have to avoid global
// qualifiers, since the function's scope has not been defined. This is an example of where
// qualification of function names in function calls are context dependent; an interesting
// example of where the C++ language is not friendly to source-to-source processing :-).
bool functionCallExpressionPreceedsDeclarationWhichAssociatesScope (SgFunctionCallExp * functionCall);
/*! \brief Compute the intersection set for two ASTs.
This is part of a test done by the copy function to compute those IR nodes in the copy that still reference the original AST.
*/
ROSE_DLL_API std::vector < SgNode * >astIntersection (SgNode * original, SgNode * copy, SgCopyHelp * help = NULL);
//! Deep copy an arbitrary subtree
ROSE_DLL_API SgNode* deepCopyNode (const SgNode* subtree);
//! A template function for deep copying a subtree. It is also used to create deepcopy functions with specialized parameter and return types. e.g SgExpression* copyExpression(SgExpression* e);
template <typename NodeType>
NodeType* deepCopy (const NodeType* subtree) {
return dynamic_cast<NodeType*>(deepCopyNode(subtree));
}
//! Deep copy an expression
ROSE_DLL_API SgExpression* copyExpression(SgExpression* e);
//!Deep copy a statement
ROSE_DLL_API SgStatement* copyStatement(SgStatement* s);
// from VarSym.cc in src/midend/astOutlining/src/ASTtools
//! Get the variable symbol for the first initialized name of a declaration stmt.
ROSE_DLL_API SgVariableSymbol* getFirstVarSym (SgVariableDeclaration* decl);
//! Get the first initialized name of a declaration statement
ROSE_DLL_API SgInitializedName* getFirstInitializedName (SgVariableDeclaration* decl);
//! A special purpose statement removal function, originally from inlinerSupport.h, Need Jeremiah's attention to refine it. Please don't use it for now.
ROSE_DLL_API void myRemoveStatement(SgStatement* stmt);
ROSE_DLL_API bool isConstantTrue(SgExpression* e);
ROSE_DLL_API bool isConstantFalse(SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(SgFunctionDeclaration* decl, SgExpression* e);
ROSE_DLL_API bool isCallToParticularFunction(const std::string& qualifiedName, size_t arity, SgExpression* e);
//! Check if a declaration has a "static' modifier
bool ROSE_DLL_API isStatic(SgDeclarationStatement* stmt);
//! Set a declaration as static
ROSE_DLL_API void setStatic(SgDeclarationStatement* stmt);
//! Check if a declaration has an "extern" modifier
ROSE_DLL_API bool isExtern(SgDeclarationStatement* stmt);
//! Set a declaration as extern
ROSE_DLL_API void setExtern(SgDeclarationStatement* stmt);
//! Interface for creating a statement whose computation writes its answer into
//! a given variable.
class StatementGenerator {
public:
virtual ~StatementGenerator() {};
virtual SgStatement* generate(SgExpression* where_to_write_answer) = 0;
};
//! Check if a SgNode _s is an assignment statement (any of =,+=,-=,&=,/=, ^=, etc)
//!
//! Return the left hand, right hand expressions and if the left hand variable is also being read
bool isAssignmentStatement(SgNode* _s, SgExpression** lhs=NULL, SgExpression** rhs=NULL, bool* readlhs=NULL);
//! Variable references can be introduced by SgVarRef, SgPntrArrRefExp, SgInitializedName, SgMemberFunctionRef etc. For Dot and Arrow Expressions, their lhs is used to obtain SgInitializedName (coarse grain) by default. Otherwise, fine-grain rhs is used.
ROSE_DLL_API SgInitializedName* convertRefToInitializedName(SgNode* current, bool coarseGrain=true);
//! Build an abstract handle from an AST node, reuse previously built handle when possible
ROSE_DLL_API AbstractHandle::abstract_handle* buildAbstractHandle(SgNode*);
//! Obtain a matching SgNode from an abstract handle string
ROSE_DLL_API SgNode* getSgNodeFromAbstractHandleString(const std::string& input_string);
//! Dump information about a SgNode for debugging
ROSE_DLL_API void dumpInfo(SgNode* node, std::string desc="");
//! Reorder a list of declaration statements based on their appearance order in source files
ROSE_DLL_API std::vector<SgDeclarationStatement*>
sortSgNodeListBasedOnAppearanceOrderInSource(const std::vector<SgDeclarationStatement*>& nodevec);
// DQ (4/13/2013): We need these to support the unparing of operators defined by operator syntax or member function names.
//! Is an overloaded operator a prefix operator (e.g. address operator X * operator&(), dereference operator X & operator*(), unary plus operator X & operator+(), etc.
// bool isPrefixOperator( const SgMemberFunctionRefExp* memberFunctionRefExp );
bool isPrefixOperator( SgExpression* exp );
//! Check for proper names of possible prefix operators (used in isPrefixOperator()).
bool isPrefixOperatorName( const SgName & functionName );
//! Is an overloaded operator a postfix operator. (e.g. ).
bool isPostfixOperator( SgExpression* exp );
//! Is an overloaded operator an index operator (also referred to as call or subscript operators). (e.g. X & operator()() or X & operator[]()).
bool isIndexOperator( SgExpression* exp );
// DQ (1/10/2014): Adding more general support for token based unparsing.
//! Used to support token unparsing (when the output the trailing token sequence).
SgStatement* lastStatementOfScopeWithTokenInfo (SgScopeStatement* scope, std::map<SgNode*,TokenStreamSequenceToNodeMapping*> & tokenStreamSequenceMap);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST properties
\brief version, language properties of current AST.
*/
// std::string version(); // utility_functions.h, version number
/*! Brief These traverse the memory pool of SgFile IR nodes and determine what languages are in use!
*/
ROSE_DLL_API bool is_C_language ();
ROSE_DLL_API bool is_OpenMP_language ();
ROSE_DLL_API bool is_UPC_language ();
//! Check if dynamic threads compilation is used for UPC programs
ROSE_DLL_API bool is_UPC_dynamic_threads();
ROSE_DLL_API bool is_C99_language ();
ROSE_DLL_API bool is_Cxx_language ();
ROSE_DLL_API bool is_Java_language ();
ROSE_DLL_API bool is_Fortran_language ();
ROSE_DLL_API bool is_CAF_language ();
ROSE_DLL_API bool is_PHP_language();
ROSE_DLL_API bool is_Python_language();
ROSE_DLL_API bool is_Cuda_language();
ROSE_DLL_API bool is_OpenCL_language();
ROSE_DLL_API bool is_X10_language();
ROSE_DLL_API bool is_binary_executable();
ROSE_DLL_API bool is_mixed_C_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_Cxx_language ();
ROSE_DLL_API bool is_mixed_Fortran_and_C_and_Cxx_language ();
//@}
//------------------------------------------------------------------------
//@{
/*! @name Scope
\brief
*/
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Assigns unique numbers to each SgScopeStatement of a function.
This is used to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void resetScopeNumbers (SgFunctionDefinition * functionDeclaration);
// DQ (10/5/2006): Added support for faster (non-quadratic) computation of unique
// labels for scopes in a function (as required for name mangling).
/*! \brief Clears the cache of scope,integer pairs for the input function.
This is used to clear the cache of computed unique labels for scopes in a function.
This function should be called after any transformation on a function that might effect
the allocation of scopes and cause the existing unique numbers to be incorrect.
This is part of support to provide unique names for variables and types defined is
different nested scopes of a function (used in mangled name generation).
*/
void clearScopeNumbers (SgFunctionDefinition * functionDefinition);
//!Find the enclosing namespace of a declaration
SgNamespaceDefinitionStatement * enclosingNamespaceScope (SgDeclarationStatement * declaration);
// SgNamespaceDefinitionStatement * getEnclosingNamespaceScope (SgNode * node);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
//!check if node1 is a strict ancestor of node 2. (a node is not considered its own ancestor)
bool ROSE_DLL_API isAncestor(SgNode* node1, SgNode* node2);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Preprocessing Information
\brief #if-#else-#end, comments, #include, etc
*/
//! Dumps a located node's preprocessing information.
void dumpPreprocInfo (SgLocatedNode* locatedNode);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file, add to be the last #include .. by default among existing headers, Or as the first header. Recommended for use.
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader, bool asLastHeader);
//! Insert a new header right before stmt, if there are existing headers attached to stmt, insert it as the last or first header as specified by asLastHeader
void insertHeader (SgStatement* stmt, PreprocessingInfo* newheader, bool asLastHeader);
//! Insert #include "filename" or #include <filename> (system header) onto the global scope of a source file
PreprocessingInfo * insertHeader(SgSourceFile * source_file, const std::string & header_file_name, bool isSystemHeader = false, PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before);
//! Insert #include "filename" or #include <filename> (system header) into the global scope containing the current scope, right after other #include XXX.
ROSE_DLL_API PreprocessingInfo* insertHeader(const std::string& filename, PreprocessingInfo::RelativePositionType position=PreprocessingInfo::after, bool isSystemHeader=false, SgScopeStatement* scope=NULL);
//! Identical to movePreprocessingInfo(), except for the stale name and confusing order of parameters. It will be deprecated soon.
ROSE_DLL_API void moveUpPreprocessingInfo (SgStatement* stmt_dst, SgStatement* stmt_src, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef, PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//! Move preprocessing information of stmt_src to stmt_dst, Only move preprocessing information from the specified source-relative position to a specified target position, otherwise move all preprocessing information with position information intact. The preprocessing information is appended to the existing preprocessing information list of the target node by default. Prepending is used if usePreprend is set to true. Optionally, the relative position can be adjust after the moving using dst_position.
ROSE_DLL_API void movePreprocessingInfo (SgStatement* stmt_src, SgStatement* stmt_dst, PreprocessingInfo::RelativePositionType src_position=PreprocessingInfo::undef,
PreprocessingInfo::RelativePositionType dst_position=PreprocessingInfo::undef, bool usePrepend= false);
//!Cut preprocessing information from a source node and save it into a buffer. Used in combination of pastePreprocessingInfo(). The cut-paste operation is similar to moveUpPreprocessingInfo() but it is more flexible in that the destination node can be unknown during the cut operation.
ROSE_DLL_API void cutPreprocessingInfo (SgLocatedNode* src_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& save_buf);
//!Paste preprocessing information from a buffer to a destination node. Used in combination of cutPreprocessingInfo()
ROSE_DLL_API void pastePreprocessingInfo (SgLocatedNode* dst_node, PreprocessingInfo::RelativePositionType pos, AttachedPreprocessingInfoType& saved_buf);
//! Attach an arbitrary string to a located node. A workaround to insert irregular statements or vendor-specific attributes.
ROSE_DLL_API PreprocessingInfo* attachArbitraryText(SgLocatedNode* target,
const std::string & text,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before);
//!Check if a pragma declaration node has macro calls attached, if yes, replace macro calls within the pragma string with expanded strings. This only works if -rose:wave is turned on.
ROSE_DLL_API void replaceMacroCallsWithExpandedStrings(SgPragmaDeclaration* target);
//@}
//! Build and attach comment onto the global scope of a source file
PreprocessingInfo* attachComment(
SgSourceFile * source_file,
const std::string & content,
PreprocessingInfo::DirectiveType directive_type = PreprocessingInfo::C_StyleComment,
PreprocessingInfo::RelativePositionType position = PreprocessingInfo::before
);
//! Build and attach comment, comment style is inferred from the language type of the target node if not provided
ROSE_DLL_API PreprocessingInfo* attachComment(SgLocatedNode* target, const std::string & content,
PreprocessingInfo::RelativePositionType position=PreprocessingInfo::before,
PreprocessingInfo::DirectiveType dtype= PreprocessingInfo::CpreprocessorUnknownDeclaration);
// DQ (11/25/2009): Added matching support for adding comments to SgAsm nodes.
// Build and attach comment
// void attachComment(SgAsmStatement* target, const std::string & content );
// DQ (7/20/2008): I am not clear were I should put this function, candidates include: SgLocatedNode or SgInterface
//! Add a string to be unparsed to support code generation for back-end specific tools or compilers.
ROSE_DLL_API void addTextForUnparser ( SgNode* astNode, std::string s, AstUnparseAttribute::RelativePositionType inputlocation );
/**
* Add preproccessor guard around a given node.
* It surrounds the node with "#if guard" and "#endif"
*/
void guardNode(SgLocatedNode * target, std::string guard);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Source File Position
\brief set Sg_File_Info for a SgNode
*/
// ************************************************************************
// Newer versions of now depricated functions
// ************************************************************************
// DQ (5/1/2012): This function queries the SageBuilder::SourcePositionClassification mode (stored in the SageBuilder
// interface) and used the specified mode to initialize the source position data (Sg_File_Info objects). This
// function is the only function that should be called directly (though in a namespace we can't define permissions).
//! Set the source code positon for the current (input) node.
ROSE_DLL_API void setSourcePosition(SgNode* node);
// A better name might be "setSourcePositionForSubTree"
//! Set the source code positon for the subtree (including the root).
ROSE_DLL_API void setSourcePositionAtRootAndAllChildren(SgNode *root);
//! DQ (5/1/2012): New function with improved name.
void setSourcePositionAsTransformation(SgNode *node);
// DQ (5/1/2012): Newly renamed function (previous name preserved for backward compatability).
void setSourcePositionPointersToNull(SgNode *node);
// ************************************************************************
// ************************************************************************
// Older deprecated functions
// ************************************************************************
// Liao, 1/8/2007, set file info. for a whole subtree as transformation generated
//! Set current node's source position as transformation generated
ROSE_DLL_API void setOneSourcePositionForTransformation(SgNode *node);
//! Set current node's source position as NULL
ROSE_DLL_API void setOneSourcePositionNull(SgNode *node);
//! Recursively set source position info(Sg_File_Info) as transformation generated
ROSE_DLL_API void setSourcePositionForTransformation (SgNode * root);
//! Set source position info(Sg_File_Info) as transformation generated for all SgNodes in memory pool
// ROSE_DLL_API void setSourcePositionForTransformation_memoryPool();
//! Check if a node is from a system header file
ROSE_DLL_API bool insideSystemHeader (SgLocatedNode* node);
//! Set the source position of SgLocatedNode to Sg_File_Info::generateDefaultFileInfo(). These nodes WILL be unparsed. Not for transformation usage.
// ROSE_DLL_API void setSourcePosition (SgLocatedNode * locatedNode);
// ************************************************************************
//@}
//------------------------------------------------------------------------
//@{
/*! @name Data types
\brief
*/
// from src/midend/astInlining/typeTraits.h
// src/midend/astUtil/astInterface/AstInterface.h
//! Get the right bool type according to C or C++ language input
SgType* getBoolType(SgNode* n);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
////!
////! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool to be treated as integer types
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//!Get the data type of the first initialized name of a declaration statement
ROSE_DLL_API SgType* getFirstVarType(SgVariableDeclaration* decl);
//! Is a type default constructible? This may not quite work properly.
ROSE_DLL_API bool isDefaultConstructible(SgType* type);
//! Is a type copy constructible? This may not quite work properly.
ROSE_DLL_API bool isCopyConstructible(SgType* type);
//! Is a type assignable? This may not quite work properly.
ROSE_DLL_API bool isAssignable(SgType* type);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//! Check if a class type is a pure virtual class. True means that there is at least
//! one pure virtual function that has not been overridden.
//! In the case of an incomplete class type (forward declaration), this function returns false.
ROSE_DLL_API bool isPureVirtualClass(SgType* type, const ClassHierarchyWrapper& classHierarchy);
#endif
//! Does a type have a trivial (built-in) destructor?
ROSE_DLL_API bool hasTrivialDestructor(SgType* t);
//! Is this type a non-constant reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isNonconstReference(SgType* t);
//! Is this type a const or non-const reference type? (Handles typedefs correctly)
ROSE_DLL_API bool isReferenceType(SgType* t);
//! Is this type a pointer type? (Handles typedefs correctly)
ROSE_DLL_API bool isPointerType(SgType* t);
//! Is this a pointer to a non-const type? Note that this function will return true for const pointers pointing to
//! non-const types. For example, (int* const y) points to a modifiable int, so this function returns true. Meanwhile,
//! it returns false for (int const * x) and (int const * const x) because these types point to a const int.
//! Also, only the outer layer of nested pointers is unwrapped. So the function returns true for (const int ** y), but returns
//! false for const (int * const * x)
ROSE_DLL_API bool isPointerToNonConstType(SgType* type);
//! Is this a const type?
/* const char* p = "aa"; is not treated as having a const type. It is a pointer to const char.
* Similarly, neither for const int b[10]; or const int & c =10;
* The standard says, "A compound type is not cv-qualified by the cv-qualifiers (if any) of
the types from which it is compounded. Any cv-qualifiers applied to an array type affect the array element type, not the array type".
*/
ROSE_DLL_API bool isConstType(SgType* t);
//! Remove const (if present) from a type. stripType() cannot do this because it removes all modifiers.
SgType* removeConst(SgType* t);
//! Is this a volatile type?
ROSE_DLL_API bool isVolatileType(SgType* t);
//! Is this a restrict type?
ROSE_DLL_API bool isRestrictType(SgType* t);
//! Is this a scalar type?
/*! We define the following SgType as scalar types: char, short, int, long , void, Wchar, Float, double, long long, string, bool, complex, imaginary
*/
ROSE_DLL_API bool isScalarType(SgType* t);
//! Check if a type is an integral type, only allowing signed/unsigned short, int, long, long long.
//!
//! There is another similar function named SgType::isIntegerType(), which allows additional types char, wchar, and bool.
ROSE_DLL_API bool isStrictIntegerType(SgType* t);
//! Check if a type is a struct type (a special SgClassType in ROSE)
ROSE_DLL_API bool isStructType(SgType* t);
//! Generate a mangled string for a given type based on Itanium C++ ABI
ROSE_DLL_API std::string mangleType(SgType* type);
//! Generate mangled scalar type names according to Itanium C++ ABI, the input type should pass isScalarType() in ROSE
ROSE_DLL_API std::string mangleScalarType(SgType* type);
//! Generated mangled modifier types, include const, volatile,according to Itanium C++ ABI, with extension to handle UPC shared types.
ROSE_DLL_API std::string mangleModifierType(SgModifierType* type);
//! Calculate the number of elements of an array type: dim1* dim2*... , assume element count is 1 for int a[]; Strip off THREADS if it is a UPC array.
ROSE_DLL_API size_t getArrayElementCount(SgArrayType* t);
//! Get the number of dimensions of an array type
ROSE_DLL_API int getDimensionCount(SgType* t);
//! Get the element type of an array. It recursively find the base type for multi-dimension array types
ROSE_DLL_API SgType* getArrayElementType(SgType* t);
//! Get the element type of an array, pointer or string, or NULL if not applicable. This function only check one level base type. No recursion.
ROSE_DLL_API SgType* getElementType(SgType* t);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// Note, the first entry of the array is a SgNullExpression, iff the
/// first array dimension was not specified.
/// \code
/// int x[] = { 1, 2, 3 };
/// \endcode
/// note, the expression does not have to be a constant
/// \code
/// int x[i*5];
/// \endcode
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype);
/// \brief returns the array dimensions in an array as defined for arrtype
/// \param arrtype the type of a C/C++ array
/// \param varref a reference to an array variable (the variable of type arrtype)
/// \return an array that contains an expression indicating each dimension's size.
/// OWNERSHIP of the expressions is TRANSFERED TO the CALLER (which
/// becomes responsible for freeing the expressions).
/// If the first array dimension was not specified an expression
/// that indicates that size is generated.
/// \code
/// int x[][3] = { 1, 2, 3, 4, 5, 6 };
/// \endcode
/// the entry for the first dimension will be:
/// \code
/// // 3 ... size of 2nd dimension
/// sizeof(x) / (sizeof(int) * 3)
/// \endcode
/// \pre arrtype is the array-type of varref
/// \post return-value.empty() == false
/// \post return-value[*] != NULL (no nullptr in the returned vector)
/// \post !isSgNullExpression(return-value[*])
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, const SgVarRefExp& varref);
/// \overload
/// \note see get_C_array_dimensions for SgVarRefExp for details.
/// \todo make initname const
std::vector<SgExpression*>
get_C_array_dimensions(const SgArrayType& arrtype, SgInitializedName& initname);
//! Check if an expression is an array access (SgPntrArrRefExp). If so, return its name expression and subscripts if requested. Users can use convertRefToInitializedName() to get the possible name. It does not check if the expression is a top level SgPntrArrRefExp.
ROSE_DLL_API bool isArrayReference(SgExpression* ref, SgExpression** arrayNameExp=NULL, std::vector<SgExpression*>** subscripts=NULL);
//! Collect variable references in array types. The default NodeQuery::querySubTree() will miss variables referenced in array type's index list. e.g. double *buffer = new double[numItems] ;
ROSE_DLL_API int collectVariableReferencesInArrayTypes (SgLocatedNode* root, Rose_STL_Container<SgNode*> & currentVarRefList);
//! Has a UPC shared type of any kinds (shared-to-shared, private-to-shared, shared-to-private, shared scalar/array)? An optional parameter, mod_type_out, stores the first SgModifierType with UPC access information.
/*!
* Note: we classify private-to-shared as 'has shared' type for convenience here. It is indeed a private type in strict sense.
AST graph for some examples:
- shared scalar: SgModifierType -->base type
- shared array: SgArrayType --> SgModiferType --> base type
- shared to shared: SgModifierType --> SgPointerType --> SgModifierType ->SgTypeInt
- shared to private: SgModifierType --> SgPointerType --> base type
- private to shared: SgPointerType --> SgModifierType --> base type
*/
ROSE_DLL_API bool hasUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL );
//! Check if a type is a UPC shared type, including shared array, shared pointers etc. Exclude private pointers to shared types. Optionally return the modifier type with the UPC shared property.
/*!
* ROSE uses SgArrayType of SgModifierType to represent shared arrays, not SgModifierType points to SgArrayType. Also typedef may cause a chain of nodes before reach the actual SgModifierType with UPC shared property.
*/
ROSE_DLL_API bool isUpcSharedType(SgType* t, SgModifierType ** mod_type_out = NULL);
//! Check if a modifier type is a UPC shared type.
ROSE_DLL_API bool isUpcSharedModifierType (SgModifierType* mod_type);
//! Check if an array type is a UPC shared type. ROSE AST represents a UPC shared array as regular array of elements of UPC shared Modifier Type. Not directly a UPC shared Modifier Type of an array.
ROSE_DLL_API bool isUpcSharedArrayType (SgArrayType* array_type);
//! Check if a shared UPC type is strict memory consistency or not. Return false if it is relaxed. (So isUpcRelaxedSharedModifierType() is not necessary.)
ROSE_DLL_API bool isUpcStrictSharedModifierType(SgModifierType* mode_type);
//! Get the block size of a UPC shared modifier type
ROSE_DLL_API size_t getUpcSharedBlockSize(SgModifierType* mod_type);
//! Get the block size of a UPC shared type, including Modifier types and array of modifier types (shared arrays)
ROSE_DLL_API size_t getUpcSharedBlockSize(SgType* t);
//! Is UPC phase-less shared type? Phase-less means block size of the first SgModifierType with UPC information is 1 or 0/unspecified. Also return false if the type is not a UPC shared type.
ROSE_DLL_API bool isUpcPhaseLessSharedType (SgType* t);
//! Is a UPC private-to-shared pointer? SgPointerType comes first compared to SgModifierType with UPC information. Input type must be any of UPC shared types first.
ROSE_DLL_API bool isUpcPrivateToSharedType(SgType* t);
//! Is a UPC array with dimension of X*THREADS
ROSE_DLL_API bool isUpcArrayWithThreads(SgArrayType* t);
//! Lookup a named type based on its name, bottomup searching from a specified scope. Note name collison might be allowed for c (not C++) between typedef and enum/struct. Only the first matched named type will be returned in this case. typedef is returned as it is, not the base type it actually refers to.
ROSE_DLL_API SgType* lookupNamedTypeInParentScopes(const std::string& type_name, SgScopeStatement* scope=NULL);
// DQ (7/22/2014): Added support for comparing expression types in actual arguments with those expected from the formal function parameter types.
//! Get the type of the associated argument expression from the function type.
ROSE_DLL_API SgType* getAssociatedTypeFromFunctionTypeList(SgExpression* actual_argument_expression);
//! Verify that 2 SgTemplateArgument are equivalent (same type, same expression, or same template declaration)
ROSE_DLL_API bool templateArgumentEquivalence(SgTemplateArgument * arg1, SgTemplateArgument * arg2);
//! Verify that 2 SgTemplateArgumentPtrList are equivalent.
ROSE_DLL_API bool templateArgumentListEquivalence(const SgTemplateArgumentPtrList & list1, const SgTemplateArgumentPtrList & list2);
//! Test for equivalence of types independent of access permissions (private or protected modes for members of classes).
ROSE_DLL_API bool isEquivalentType (const SgType* lhs, const SgType* rhs);
//! Test if two types are equivalent SgFunctionType nodes. This is necessary for template function types
//! They may differ in one SgTemplateType pointer but identical otherwise.
ROSE_DLL_API bool isEquivalentFunctionType (const SgFunctionType* lhs, const SgFunctionType* rhs);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Loop handling
\brief
*/
// by Jeremiah
//! Add a step statement to the end of a loop body
//! Add a new label to the end of the loop, with the step statement after
//! it; then change all continue statements in the old loop body into
//! jumps to the label
//!
//! For example:
//! while (a < 5) {if (a < -3) continue;} (adding "a++" to end) becomes
//! while (a < 5) {if (a < -3) goto label; label: a++;}
ROSE_DLL_API void addStepToLoopBody(SgScopeStatement* loopStmt, SgStatement* step);
ROSE_DLL_API void moveForStatementIncrementIntoBody(SgForStatement* f);
ROSE_DLL_API void convertForToWhile(SgForStatement* f);
ROSE_DLL_API void convertAllForsToWhiles(SgNode* top);
//! Change continue statements in a given block of code to gotos to a label
ROSE_DLL_API void changeContinuesToGotos(SgStatement* stmt, SgLabelStatement* label);
//!Return the loop index variable for a for loop
ROSE_DLL_API SgInitializedName* getLoopIndexVariable(SgNode* loop);
//!Check if a SgInitializedName is used as a loop index within a AST subtree
//! This function will use a bottom-up traverse starting from the subtree_root to find all enclosing loops and check if ivar is used as an index for either of them.
ROSE_DLL_API bool isLoopIndexVariable(SgInitializedName* ivar, SgNode* subtree_root);
//! Check if a for loop uses C99 style initialization statement with multiple expressions like for (int i=0, j=0; ..) or for (i=0,j=0;...)
/*!
for (int i=0, j=0; ..) is stored as two variable declarations under SgForInitStatement's init_stmt member
for (i=0,j=0;...) is stored as a single expression statement, with comma expression (i=0,j=0).
*/
ROSE_DLL_API bool hasMultipleInitStatmentsOrExpressions (SgForStatement* for_loop);
//! Routines to get and set the body of a loop
ROSE_DLL_API SgStatement* getLoopBody(SgScopeStatement* loop);
ROSE_DLL_API void setLoopBody(SgScopeStatement* loop, SgStatement* body);
//! Routines to get the condition of a loop. It recognize While-loop, For-loop, and Do-While-loop
ROSE_DLL_API SgStatement* getLoopCondition(SgScopeStatement* loop);
//! Set the condition statement of a loop, including While-loop, For-loop, and Do-While-loop.
ROSE_DLL_API void setLoopCondition(SgScopeStatement* loop, SgStatement* cond);
//! Check if a for-loop has a canonical form, return loop index, bounds, step, and body if requested
//!
//! A canonical form is defined as : one initialization statement, a test expression, and an increment expression , loop index variable should be of an integer type. IsInclusiveUpperBound is true when <= or >= is used for loop condition
ROSE_DLL_API bool isCanonicalForLoop(SgNode* loop, SgInitializedName** ivar=NULL, SgExpression** lb=NULL, SgExpression** ub=NULL, SgExpression** step=NULL, SgStatement** body=NULL, bool *hasIncrementalIterationSpace = NULL, bool* isInclusiveUpperBound = NULL);
//! Check if a Fortran Do loop has a complete canonical form: Do I=1, 10, 1
ROSE_DLL_API bool isCanonicalDoLoop(SgFortranDo* loop,SgInitializedName** ivar/*=NULL*/, SgExpression** lb/*=NULL*/, SgExpression** ub/*=NULL*/, SgExpression** step/*=NULL*/, SgStatement** body/*=NULL*/, bool *hasIncrementalIterationSpace/*= NULL*/, bool* isInclusiveUpperBound/*=NULL*/);
//! Set the lower bound of a loop header for (i=lb; ...)
ROSE_DLL_API void setLoopLowerBound(SgNode* loop, SgExpression* lb);
//! Set the upper bound of a loop header,regardless the condition expression type. for (i=lb; i op up, ...)
ROSE_DLL_API void setLoopUpperBound(SgNode* loop, SgExpression* ub);
//! Set the stride(step) of a loop 's incremental expression, regardless the expression types (i+=s; i= i+s, etc)
ROSE_DLL_API void setLoopStride(SgNode* loop, SgExpression* stride);
//! Normalize loop init stmt by promoting the single variable declaration statement outside of the for loop header's init statement, e.g. for (int i=0;) becomes int i_x; for (i_x=0;..) and rewrite the loop with the new index variable, if necessary
ROSE_DLL_API bool normalizeForLoopInitDeclaration(SgForStatement* loop);
//! Undo the normalization of for loop's C99 init declaration. Previous record of normalization is used to ease the reverse transformation.
ROSE_DLL_API bool unnormalizeForLoopInitDeclaration(SgForStatement* loop);
//! Normalize a for loop, return true if successful. Generated constants will be fold by default.
//!
//! Translations are :
//! For the init statement: for (int i=0;... ) becomes int i; for (i=0;..)
//! For test expression:
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
//! For increment expression:
//! i++ is normalized to i+=1 and
//! i-- is normalized to i+=-1
//! i-=s is normalized to i+= -s
ROSE_DLL_API bool forLoopNormalization(SgForStatement* loop, bool foldConstant = true);
//! Normalize a for loop's test expression
//! i<x is normalized to i<= (x-1) and
//! i>x is normalized to i>= (x+1)
ROSE_DLL_API bool normalizeForLoopTest(SgForStatement* loop);
ROSE_DLL_API bool normalizeForLoopIncrement(SgForStatement* loop);
//!Normalize a Fortran Do loop. Make the default increment expression (1) explicit
ROSE_DLL_API bool doLoopNormalization(SgFortranDo* loop);
//! Unroll a target loop with a specified unrolling factor. It handles steps larger than 1 and adds a fringe loop if the iteration count is not evenly divisible by the unrolling factor.
ROSE_DLL_API bool loopUnrolling(SgForStatement* loop, size_t unrolling_factor);
//! Interchange/permutate a n-level perfectly-nested loop rooted at 'loop' using a lexicographical order number within (0,depth!).
ROSE_DLL_API bool loopInterchange(SgForStatement* loop, size_t depth, size_t lexicoOrder);
//! Tile the n-level (starting from 1) loop of a perfectly nested loop nest using tiling size s
ROSE_DLL_API bool loopTiling(SgForStatement* loopNest, size_t targetLevel, size_t tileSize);
//Winnie Loop Collapsing
SgExprListExp * loopCollapsing(SgForStatement* target_loop, size_t collapsing_factor);
bool getForLoopInformations(
SgForStatement * for_loop,
SgVariableSymbol * & iterator,
SgExpression * & lower_bound,
SgExpression * & upper_bound,
SgExpression * & stride
);
//@}
//------------------------------------------------------------------------
//@{
/*! @name Topdown search
\brief Top-down traversal from current node to find a node of a specified type
*/
//! Query a subtree to get all nodes of a given type, with an appropriate downcast.
template <typename NodeType>
std::vector<NodeType*> querySubTree(SgNode* top, VariantT variant = (VariantT)NodeType::static_variant)
{
Rose_STL_Container<SgNode*> nodes = NodeQuery::querySubTree(top,variant);
std::vector<NodeType*> result(nodes.size(), NULL);
int count = 0;
for (Rose_STL_Container<SgNode*>::const_iterator i = nodes.begin();
i != nodes.end(); ++i, ++count) {
NodeType* node = dynamic_cast<NodeType*>(*i);
ROSE_ASSERT (node);
result[count] = node;
}
return result;
}
/*! \brief Returns STL vector of SgFile IR node pointers.
Demonstrates use of restricted traversal over just SgFile IR nodes.
*/
std::vector < SgFile * >generateFileList ();
/** Get the current SgProject IR Node.
*
* The library should never have more than one project and it asserts such. If no project has been created yet then this
* function returns the null pointer. */
ROSE_DLL_API SgProject * getProject();
//! \return the project associated with a node
SgProject * getProject(const SgNode * node);
//! Query memory pools to grab SgNode of a specified type
template <typename NodeType>
static std::vector<NodeType*> getSgNodeListFromMemoryPool()
{
// This function uses a memory pool traversal specific to the SgFile IR nodes
class MyTraversal : public ROSE_VisitTraversal
{
public:
std::vector<NodeType*> resultlist;
void visit ( SgNode* node)
{
NodeType* result = dynamic_cast<NodeType* > (node);
ROSE_ASSERT(result!= NULL);
if (result!= NULL)
{
resultlist.push_back(result);
}
};
virtual ~MyTraversal() {}
};
MyTraversal my_traversal;
NodeType::traverseMemoryPoolNodes(my_traversal);
return my_traversal.resultlist;
}
/*! \brief top-down traversal from current node to find the main() function declaration
*/
ROSE_DLL_API SgFunctionDeclaration* findMain(SgNode* currentNode);
//! Find the last declaration statement within a scope (if any). This is often useful to decide where to insert another variable declaration statement. Pragma declarations are not treated as a declaration by default in this context.
SgStatement* findLastDeclarationStatement(SgScopeStatement * scope, bool includePragma = false);
//midend/programTransformation/partialRedundancyElimination/pre.h
//! Find referenced symbols within an expression
std::vector<SgVariableSymbol*> getSymbolsUsedInExpression(SgExpression* expr);
//! Find break statements inside a particular statement, stopping at nested loops or switches
/*! loops or switch statements defines their own contexts for break
statements. The function will stop immediately if run on a loop or switch
statement. If fortranLabel is non-empty, breaks (EXITs) to that label within
nested loops are included in the returned list.
*/
std::vector<SgBreakStmt*> findBreakStmts(SgStatement* code, const std::string& fortranLabel = "");
//! Find all continue statements inside a particular statement, stopping at nested loops
/*! Nested loops define their own contexts for continue statements. The
function will stop immediately if run on a loop
statement. If fortranLabel is non-empty, continues (CYCLEs) to that label
within nested loops are included in the returned list.
*/
std::vector<SgContinueStmt*> findContinueStmts(SgStatement* code, const std::string& fortranLabel = "");
std::vector<SgGotoStatement*> findGotoStmts(SgStatement* scope, SgLabelStatement* l);
std::vector<SgStatement*> getSwitchCases(SgSwitchStatement* sw);
//! Collect all variable references in a subtree
void collectVarRefs(SgLocatedNode* root, std::vector<SgVarRefExp* >& result);
//! Topdown traverse a subtree from root to find the first declaration given its name, scope (optional, can be NULL), and defining or nondefining flag.
template <typename T>
T* findDeclarationStatement(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining)
{
bool found = false;
#if 0
printf ("In findDeclarationStatement(): root = %p \n",root);
printf ("In findDeclarationStatement(): name = %s \n",name.c_str());
printf ("In findDeclarationStatement(): scope = %p \n",scope);
printf ("In findDeclarationStatement(): isDefining = %s \n",isDefining ? "true" : "false");
#endif
// Do we really want a NULL pointer to be acceptable input to this function?
// Maybe we should have an assertion that it is non-null?
if (!root) return NULL;
T* decl = dynamic_cast<T*>(root);
#if 0
printf ("In findDeclarationStatement(): decl = %p \n",decl);
#endif
if (decl != NULL)
{
if (scope)
{
if ((decl->get_scope() == scope) && (decl->search_for_symbol_from_symbol_table()->get_name() == name))
{
found = true;
}
}
else // Liao 2/9/2010. We should allow NULL scope
{
#if 0
// DQ (12/6/2016): Include this into the debugging code to aboid compiler warning about unused variable.
SgSymbol* symbol = decl->search_for_symbol_from_symbol_table();
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table() = %p \n",symbol);
printf ("In findDeclarationStatement(): decl->search_for_symbol_from_symbol_table()->get_name() = %s \n",symbol->get_name().str());
#endif
if (decl->search_for_symbol_from_symbol_table()->get_name() == name)
{
found = true;
}
}
}
if (found)
{
if (isDefining)
{
#if 0
printf ("In findDeclarationStatement(): decl->get_firstNondefiningDeclaration() = %p \n",decl->get_firstNondefiningDeclaration());
printf ("In findDeclarationStatement(): decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
ROSE_ASSERT (decl->get_definingDeclaration() != NULL);
#if 0
printf ("In findDeclarationStatement(): returing decl->get_definingDeclaration() = %p \n",decl->get_definingDeclaration());
#endif
return dynamic_cast<T*> (decl->get_definingDeclaration());
}
else
{
#if 0
printf ("In findDeclarationStatement(): returing decl = %p \n",decl);
#endif
return decl;
}
}
std::vector<SgNode*> children = root->get_traversalSuccessorContainer();
#if 0
printf ("In findDeclarationStatement(): children.size() = %zu \n",children.size());
#endif
// DQ (4/10/2016): Note that if we are searching for a function member that has it's defining
// declaration defined outside of the class then it will not be found in the child list.
for (std::vector<SgNode*>::const_iterator i = children.begin(); i != children.end(); ++i)
{
T* target = findDeclarationStatement<T> (*i,name,scope,isDefining);
if (target)
{
return target;
}
}
return NULL;
}
//! Topdown traverse a subtree from root to find the first function declaration matching the given name, scope (optional, can be NULL), and defining or nondefining flag. This is an instantiation of findDeclarationStatement<T>.
SgFunctionDeclaration* findFunctionDeclaration(SgNode* root, std::string name, SgScopeStatement* scope, bool isDefining);
#if 0 //TODO
// 1. preorder traversal from current SgNode till find next SgNode of type V_SgXXX
// until reach the end node
SgNode* getNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
// 2. return all nodes of type VariantT following the source node
std::vector<SgNode*> getAllNextSgNode( const SgNode* astSourceNode, VariantT=V_SgNode, SgNode* astEndNode=NULL);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name Bottom up search
\brief Backwards traverse through the AST to find a node, findEnclosingXXX()
*/
// remember to put const to all arguments.
/** Find a node by type using upward traversal.
*
* Traverse backward through a specified node's ancestors, starting with the node's parent and progressing to more distant
* ancestors, to find the first node matching the specified or derived type. If @p includingSelf is true then the
* starting node, @p astNode, is returned if its type matches, otherwise the search starts at the parent of @p astNode.
*
* For the purposes of this function, the parent (P) of an SgDeclarationStatement node (N) is considered to be the first
* non-defining declaration of N if N has both a defining declaration and a first non-defining declaration and the defining
* declaration is different than the first non-defining declaration.
*
* If no ancestor of the requisite type of subtypes is found then this function returns a null pointer.
*
* If @p astNode is the null pointer, then the return value is a null pointer. That is, if there is no node, then there cannot
* be an enclosing node of the specified type. */
template <typename NodeType>
NodeType* getEnclosingNode(const SgNode* astNode, const bool includingSelf = false)
{
#if 1
// DQ (10/20/2012): This is the older version of this implementation. Until I am sure that
// the newer version (below) is what we want to use I will resolve this conflict by keeping
// the previous version in place.
if (NULL == astNode)
{
return NULL;
}
if ( (includingSelf ) && (dynamic_cast<const NodeType*>(astNode)) )
{
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (astNode));
}
// DQ (3/5/2012): Check for reference to self...
ROSE_ASSERT(astNode->get_parent() != astNode);
SgNode* parent = astNode->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
SgNode* previouslySeenParent = parent;
bool foundCycle = false;
while ( (foundCycle == false) && (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode(): parent = %p = %s \n",parent,parent->class_name().c_str());
#endif
parent = parent->get_parent();
// DQ (3/5/2012): Check for loops that will cause infinite loops.
// ROSE_ASSERT(parent != previouslySeenParent);
if (parent == previouslySeenParent)
{
foundCycle = true;
}
}
#if 0
printf ("previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
parent = previouslySeenParent;
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p \n",declarationStatement);
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the non-defining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
#if 0
printf ("reset: previouslySeenParent = %p = %s \n",previouslySeenParent,previouslySeenParent->class_name().c_str());
#endif
// DQ (10/19/2012): This branch is just to document the cycle that was previously detected, it is for
// debugging only. Thus it ony make sense for it to be executed when "(foundCycle == true)". However,
// this will have to be revisited later since it appears clear that it is a problem for the binary analysis
// work when it is visited for this case. Since the cycle is detected, but there is no assertion on the
// cycle, we don't exit when a cycle is identified (which is the point of the code below).
// Note also that I have fixed the code (above and below) to only chase pointers through defining
// declarations (where they exist), this is important since non-defining declarations can be almost
// anywhere (and thus chasing them can make it appear that there are cycles where there are none
// (I think); test2012_234.C demonstrates an example of this.
// DQ (10/9/2012): Robb has suggested this change to fix the binary analysis work.
// if (foundCycle == true)
if (foundCycle == false)
{
while ( (parent != NULL) && (!dynamic_cast<const NodeType*>(parent)) )
{
ROSE_ASSERT(parent->get_parent() != parent);
#if 0
printf ("In getEnclosingNode() (2nd try): parent = %p = %s \n",parent,parent->class_name().c_str());
if (parent->get_file_info() != NULL)
parent->get_file_info()->display("In getEnclosingNode() (2nd try): debug");
#endif
SgDeclarationStatement* declarationStatement = isSgDeclarationStatement(parent);
if (declarationStatement != NULL)
{
#if 0
printf ("Found a SgDeclarationStatement \n");
#endif
SgDeclarationStatement* definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement* firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
#if 0
printf (" --- declarationStatement = %p = %s \n",declarationStatement,(declarationStatement != NULL) ? declarationStatement->class_name().c_str() : "null");
printf (" --- definingDeclaration = %p \n",definingDeclaration);
if (definingDeclaration != NULL && definingDeclaration->get_parent() != NULL)
printf (" --- definingDeclaration ->get_parent() = %p = %s \n",definingDeclaration->get_parent(),definingDeclaration->get_parent()->class_name().c_str());
printf (" --- firstNondefiningDeclaration = %p \n",firstNondefiningDeclaration);
if (firstNondefiningDeclaration != NULL && firstNondefiningDeclaration->get_parent() != NULL)
printf (" --- firstNondefiningDeclaration ->get_parent() = %p = %s \n",firstNondefiningDeclaration->get_parent(),firstNondefiningDeclaration->get_parent()->class_name().c_str());
#endif
if (definingDeclaration != NULL && declarationStatement != firstNondefiningDeclaration)
{
#if 0
printf ("Found a nondefining declaration so use the firstNondefining declaration instead \n");
#endif
// DQ (10/19/2012): Use the defining declaration instead.
// parent = firstNondefiningDeclaration;
parent = definingDeclaration;
}
}
parent = parent->get_parent();
#if 1
// DQ (3/5/2012): Check for loops that will cause infinite loops.
ROSE_ASSERT(parent != previouslySeenParent);
#else
printf ("WARNING::WARNING::WARNING commented out assertion for parent != previouslySeenParent \n");
if (parent == previouslySeenParent)
break;
#endif
}
}
return const_cast<NodeType*>(dynamic_cast<const NodeType*> (parent));
#else
// DQ (10/20/2012): Using Robb's newer version with my modification to use the definingDeclaration rather than firstNondefiningDeclaration (below).
// Find the parent of specified type, but watch out for cycles in the ancestry (which would cause an infinite loop).
// Cast away const because isSg* functions aren't defined for const node pointers; and our return is not const.
SgNode *node = const_cast<SgNode*>(!astNode || includingSelf ? astNode : astNode->get_parent());
std::set<const SgNode*> seen; // nodes we've seen, in order to detect cycles
while (node) {
if (NodeType *found = dynamic_cast<NodeType*>(node))
return found;
// FIXME: Cycle detection could be moved elsewhere so we don't need to do it on every call. [RPM 2012-10-09]
ROSE_ASSERT(seen.insert(node).second);
// Traverse to parent (declaration statements are a special case)
if (SgDeclarationStatement *declarationStatement = isSgDeclarationStatement(node)) {
SgDeclarationStatement *definingDeclaration = declarationStatement->get_definingDeclaration();
SgDeclarationStatement *firstNondefiningDeclaration = declarationStatement->get_firstNondefiningDeclaration();
if (definingDeclaration && firstNondefiningDeclaration && declarationStatement != firstNondefiningDeclaration) {
// DQ (10/19/2012): Use the defining declaration instead.
// node = firstNondefiningDeclaration;
node = definingDeclaration;
}
} else {
node = node->get_parent();
}
}
return NULL;
#endif
}
//! Find enclosing source file node
ROSE_DLL_API SgSourceFile* getEnclosingSourceFile(SgNode* n, const bool includingSelf=false);
//! Get the closest scope from astNode. Return astNode if it is already a scope.
ROSE_DLL_API SgScopeStatement* getScope(const SgNode* astNode);
//! Get the enclosing scope from a node n
ROSE_DLL_API SgScopeStatement* getEnclosingScope(SgNode* n, const bool includingSelf=false);
//! Traverse back through a node's parents to find the enclosing global scope
ROSE_DLL_API SgGlobal* getGlobalScope( const SgNode* astNode);
//! Find the function definition
ROSE_DLL_API SgFunctionDefinition* getEnclosingProcedure(SgNode* n, const bool includingSelf=false);
ROSE_DLL_API SgFunctionDefinition* getEnclosingFunctionDefinition(SgNode* astNode, const bool includingSelf=false);
//! Find the closest enclosing statement, including the given node
ROSE_DLL_API SgStatement* getEnclosingStatement(SgNode* n);
//! Find the closest switch outside a given statement (normally used for case and default statements)
ROSE_DLL_API SgSwitchStatement* findEnclosingSwitch(SgStatement* s);
//! Find enclosing OpenMP clause body statement from s. If s is already one, return it directly.
ROSE_DLL_API SgOmpClauseBodyStatement* findEnclosingOmpClauseBodyStatement(SgStatement* s);
//! Find the closest loop outside the given statement; if fortranLabel is not empty, the Fortran label of the loop must be equal to it
ROSE_DLL_API SgScopeStatement* findEnclosingLoop(SgStatement* s, const std::string& fortranLabel = "", bool stopOnSwitches = false);
//! Find the enclosing function declaration, including its derived instances like isSgProcedureHeaderStatement, isSgProgramHeaderStatement, and isSgMemberFunctionDeclaration.
ROSE_DLL_API SgFunctionDeclaration * getEnclosingFunctionDeclaration (SgNode * astNode, const bool includingSelf=false);
//roseSupport/utility_functions.h
//! get the SgFile node from current node
ROSE_DLL_API SgFile* getEnclosingFileNode (SgNode* astNode );
//! Get the initializer containing an expression if it is within an initializer.
ROSE_DLL_API SgInitializer* getInitializerOfExpression(SgExpression* n);
//! Get the closest class definition enclosing the specified AST node,
ROSE_DLL_API SgClassDefinition* getEnclosingClassDefinition(SgNode* astnode, const bool includingSelf=false);
// TODO
#if 0
SgNode * getEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
std::vector<SgNode *> getAllEnclosingSgNode(SgNode* source,VariantT, SgNode* endNode=NULL);
SgVariableDeclaration* findVariableDeclaratin( const string& varname)
SgClassDeclaration* getEnclosingClassDeclaration( const SgNode* astNode);
// e.g. for some expression, find its parent statement
SgStatement* getEnclosingStatement(const SgNode* astNode);
SgSwitchStatement* getEnclosingSwitch(SgStatement* s);
SgModuleStatement* getEnclosingModuleStatement( const SgNode* astNode);
// used to build a variable reference for compiler generated code in current scope
SgSymbol * findReachingDefinition (SgScopeStatement* startScope, SgName &name);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Walk and Traversal
\brief
*/
// Liao, 1/9/2008
/*!
\brief return the first global scope under current project
*/
ROSE_DLL_API SgGlobal * getFirstGlobalScope(SgProject *project);
/*!
\brief get the last statement within a scope, return NULL if it does not exit
*/
ROSE_DLL_API SgStatement* getLastStatement(SgScopeStatement *scope);
//! Get the first statement within a scope, return NULL if it does not exist. Skip compiler-generated statement by default. Count transformation-generated ones, but excluding those which are not to be outputted in unparsers.
ROSE_DLL_API SgStatement* getFirstStatement(SgScopeStatement *scope,bool includingCompilerGenerated=false);
//!Find the first defining function declaration statement in a scope
ROSE_DLL_API SgFunctionDeclaration* findFirstDefiningFunctionDecl(SgScopeStatement* scope);
//! Get next statement within the same scope of current statement
ROSE_DLL_API SgStatement* getNextStatement(SgStatement * currentStmt);
//! Get previous statement of the current statement. It may return a previous statement of a parent scope by default (climbOutScope is true), otherwise only a previous statement of the same scope is returned.
ROSE_DLL_API SgStatement* getPreviousStatement(SgStatement * currentStmt, bool climbOutScope = true);
#if 0 //TODO
// preorder traversal from current SgNode till find next SgNode of type V_SgXXX
SgNode* getNextSgNode( const SgNode* currentNode, VariantT=V_SgNode);
#endif
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST Comparison
\brief Compare AST nodes, subtree, etc
*/
//! Check if a SgIntVal node has a given value
ROSE_DLL_API bool isEqualToIntConst(SgExpression* e, int value);
//! Check if two function declarations refer to the same one. Two function declarations are the same when they are a) identical, b) same name in C c) same qualified named and mangled name in C++. A nondefining (prototype) declaration and a defining declaration of a same function are treated as the same.
/*!
* There is a similar function bool compareFunctionDeclarations(SgFunctionDeclaration *f1, SgFunctionDeclaration *f2) from Classhierarchy.C
*/
ROSE_DLL_API bool isSameFunction(SgFunctionDeclaration* func1, SgFunctionDeclaration* func2);
//! Check if a statement is the last statement within its closed scope
ROSE_DLL_API bool isLastStatement(SgStatement* stmt);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST insert, removal, and replacement
\brief Add, remove,and replace AST
scope->append_statement(), exprListExp->append_expression() etc. are not enough to handle side effect of parent pointers, symbol tables, preprocessing info, defining/nondefining pointers etc.
*/
// DQ (2/24/2009): Simple function to delete an AST subtree (used in outlining).
//! Function to delete AST subtree's nodes only, users must take care of any dangling pointers, symbols or types that result.
ROSE_DLL_API void deleteAST(SgNode* node);
//! Special purpose function for deleting AST expression tress containing valid original expression trees in constant folded expressions (for internal use only).
ROSE_DLL_API void deleteExpressionTreeWithOriginalExpressionSubtrees(SgNode* root);
// DQ (2/25/2009): Added new function to support outliner.
//! Move statements in first block to the second block (preserves order and rebuilds the symbol table).
ROSE_DLL_API void moveStatementsBetweenBlocks ( SgBasicBlock* sourceBlock, SgBasicBlock* targetBlock );
//! Move a variable declaration to a new scope, handle symbol, special scopes like For loop, etc.
ROSE_DLL_API void moveVariableDeclaration(SgVariableDeclaration* decl, SgScopeStatement* target_scope);
//! Append a statement to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Append a statement to the end of SgForInitStatement
ROSE_DLL_API void appendStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! Append a list of statements to the end of the current scope, handle side effect of appending statements, e.g. preprocessing info, defining/nondefining pointers etc.
ROSE_DLL_API void appendStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
// DQ (2/6/2009): Added function to support outlining into separate file.
//! Append a copy ('decl') of a function ('original_statement') into a 'scope', include any referenced declarations required if the scope is within a compiler generated file. All referenced declarations, including those from headers, are inserted if excludeHeaderFiles is set to true (the new file will not have any headers).
ROSE_DLL_API void appendStatementWithDependentDeclaration( SgDeclarationStatement* decl, SgGlobal* scope, SgStatement* original_statement, bool excludeHeaderFiles );
//! Prepend a statement to the beginning of the current scope, handling side
//! effects as appropriate
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgScopeStatement* scope=NULL);
//! Prepend a statement to the beginning of SgForInitStatement
ROSE_DLL_API void prependStatement(SgStatement *stmt, SgForInitStatement* for_init_stmt);
//! prepend a list of statements to the beginning of the current scope,
//! handling side effects as appropriate
ROSE_DLL_API void prependStatementList(const std::vector<SgStatement*>& stmt, SgScopeStatement* scope=NULL);
//! Check if a scope statement has a simple children statement list
//! so insert additional statements under the scope is straightforward and unambiguous .
//! for example, SgBasicBlock has a simple statement list while IfStmt does not.
ROSE_DLL_API bool hasSimpleChildrenList (SgScopeStatement* scope);
//! Insert a statement before or after the target statement within the target's scope. Move around preprocessing info automatically
ROSE_DLL_API void insertStatement(SgStatement *targetStmt, SgStatement* newStmt, bool insertBefore= true, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before or after the target statement within the
//target's scope
ROSE_DLL_API void insertStatementList(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts, bool insertBefore= true);
//! Insert a statement before a target statement
ROSE_DLL_API void insertStatementBefore(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements before a target statement
ROSE_DLL_API void insertStatementListBefore(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmts);
//! Insert a statement after a target statement, Move around preprocessing info automatically by default
ROSE_DLL_API void insertStatementAfter(SgStatement *targetStmt, SgStatement* newStmt, bool autoMovePreprocessingInfo = true);
//! Insert a list of statements after a target statement
ROSE_DLL_API void insertStatementListAfter(SgStatement *targetStmt, const std::vector<SgStatement*>& newStmt);
//! Insert a statement after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(SgStatement* stmt, SgScopeStatement* scope);
//! Insert a list of statements after the last declaration within a scope. The statement will be prepended to the scope if there is no declaration statement found
ROSE_DLL_API void insertStatementAfterLastDeclaration(std::vector<SgStatement*> stmt_list, SgScopeStatement* scope);
//! Insert a statement before the first non-declaration statement in a scope. If the scope has no non-declaration statements
// then the statement is inserted at the end of the scope.
ROSE_DLL_API void insertStatementBeforeFirstNonDeclaration(SgStatement *newStmt, SgScopeStatement *scope,
bool movePreprocessingInfo=true);
//! Insert statements before the first non-declaration statement in a scope. If the scope has no non-declaration statements
//then the new statements are inserted at the end of the scope.
ROSE_DLL_API void insertStatementListBeforeFirstNonDeclaration(const std::vector<SgStatement*> &newStmts,
SgScopeStatement *scope);
//! Remove a statement from its attach point of the AST. Automatically keep its associated preprocessing information at the original place after the removal. The statement is still in memory and it is up to the users to decide if the removed one will be inserted somewhere else or released from memory (deleteAST()).
ROSE_DLL_API void removeStatement(SgStatement* stmt, bool autoRelocatePreprocessingInfo = true);
//! Deep delete a sub AST tree. It uses postorder traversal to delete each child node. Users must take care of any dangling pointers, symbols or types that result. This is identical to deleteAST()
ROSE_DLL_API void deepDelete(SgNode* root);
//! Replace a statement with another. Move preprocessing information from oldStmt to newStmt if requested.
ROSE_DLL_API void replaceStatement(SgStatement* oldStmt, SgStatement* newStmt, bool movePreprocessinInfo = false);
//! Replace an anchor node with a specified pattern subtree with optional SgVariantExpression. All SgVariantExpression in the pattern will be replaced with copies of the anchor node.
ROSE_DLL_API SgNode* replaceWithPattern (SgNode * anchor, SgNode* new_pattern);
//! Replace all variable references to an old symbol in a scope to being references to a new symbol.
// Essentially replace variable a with b.
ROSE_DLL_API void replaceVariableReferences(SgVariableSymbol* old_sym, SgVariableSymbol* new_sym, SgScopeStatement * scope );
/** Given an expression, generates a temporary variable whose initializer optionally evaluates
* that expression. Then, the var reference expression returned can be used instead of the original
* expression. The temporary variable created can be reassigned to the expression by the returned SgAssignOp;
* this can be used when the expression the variable represents needs to be evaluated. NOTE: This handles
* reference types correctly by using pointer types for the temporary.
* @param expression Expression which will be replaced by a variable
* @param scope scope in which the temporary variable will be generated
* @param reEvaluate an assignment op to reevaluate the expression. Leave NULL if not needed
* @return declaration of the temporary variable, and a a variable reference expression to use instead of
* the original expression. */
std::pair<SgVariableDeclaration*, SgExpression* > createTempVariableForExpression(SgExpression* expression,
SgScopeStatement* scope, bool initializeInDeclaration, SgAssignOp** reEvaluate = NULL);
/* This function creates a temporary variable for a given expression in the given scope
This is different from SageInterface::createTempVariableForExpression in that it does not
try to be smart to create pointers to reference types and so on. The tempt is initialized to expression.
The caller is responsible for setting the parent of SgVariableDeclaration since buildVariableDeclaration
may not set_parent() when the scope stack is empty. See programTransformation/extractFunctionArgumentsNormalization/ExtractFunctionArguments.C for sample usage.
@param expression Expression which will be replaced by a variable
@param scope scope in which the temporary variable will be generated
*/
std::pair<SgVariableDeclaration*, SgExpression*> createTempVariableAndReferenceForExpression
(SgExpression* expression, SgScopeStatement* scope);
//! Append an argument to SgFunctionParameterList, transparently set parent,scope, and symbols for arguments when possible
/*! We recommend to build SgFunctionParameterList before building a function declaration
However, it is still allowed to append new arguments for existing function declarations.
\todo function type , function symbol also need attention.
*/
ROSE_DLL_API SgVariableSymbol* appendArg(SgFunctionParameterList *, SgInitializedName*);
//!Prepend an argument to SgFunctionParameterList
ROSE_DLL_API SgVariableSymbol* prependArg(SgFunctionParameterList *, SgInitializedName*);
//! Append an expression to a SgExprListExp, set the parent pointer also
ROSE_DLL_API void appendExpression(SgExprListExp *, SgExpression*);
//! Append an expression list to a SgExprListExp, set the parent pointers also
ROSE_DLL_API void appendExpressionList(SgExprListExp *, const std::vector<SgExpression*>&);
//! Set parameter list for a function declaration, considering existing parameter list etc.
template <class actualFunction>
void setParameterList(actualFunction *func,SgFunctionParameterList *paralist) {
// TODO consider the difference between C++ and Fortran
// fixup the scope of arguments,no symbols for nondefining function declaration's arguments
// DQ (11/25/2011): templated function so that we can handle both
// SgFunctionDeclaration and SgTemplateFunctionDeclaration (and their associated member
// function derived classes).
ROSE_ASSERT(func != NULL);
ROSE_ASSERT(paralist != NULL);
#if 0
// At this point we don't have cerr and endl defined, so comment this code out.
// Warn to users if a paralist is being shared
if (paralist->get_parent() !=NULL)
{
cerr << "Waring! Setting a used SgFunctionParameterList to function: "
<< (func->get_name()).getString()<<endl
<< " Sharing parameter lists can corrupt symbol tables!"<<endl
<< " Please use deepCopy() to get an exclusive parameter list for each function declaration!"<<endl;
// ROSE_ASSERT(false);
}
#endif
// Liao,2/5/2008 constructor of SgFunctionDeclaration will automatically generate SgFunctionParameterList, so be cautious when set new paralist!!
if (func->get_parameterList() != NULL)
{
if (func->get_parameterList() != paralist)
{
delete func->get_parameterList();
}
}
func->set_parameterList(paralist);
paralist->set_parent(func);
// DQ (5/15/2012): Need to set the declptr in each SgInitializedName IR node.
// This is needed to support the AST Copy mechanism (at least). The files: test2005_150.C,
// test2012_81.C and testcode2012_82.C demonstrate this problem.
SgInitializedNamePtrList & args = paralist->get_args();
for (SgInitializedNamePtrList::iterator i = args.begin(); i != args.end(); i++)
{
(*i)->set_declptr(func);
}
}
//! Set a pragma of a pragma declaration. handle memory release for preexisting pragma, and set parent pointer.
ROSE_DLL_API void setPragma(SgPragmaDeclaration* decl, SgPragma *pragma);
//! Replace an expression with another, used for variable reference substitution and others. the old expression can be deleted (default case) or kept.
ROSE_DLL_API void replaceExpression(SgExpression* oldExp, SgExpression* newExp, bool keepOldExp=false);
//! Replace a given expression with a list of statements produced by a generator
ROSE_DLL_API void replaceExpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Similar to replaceExpressionWithStatement, but with more restrictions.
//! Assumptions: from is not within the test of a loop or ifStmt, not currently traversing from or the statement it is in
ROSE_DLL_API void replaceSubexpressionWithStatement(SgExpression* from,
SageInterface::StatementGenerator* to);
//! Set operands for expressions with single operand, such as unary expressions. handle file info, lvalue, pointer downcasting, parent pointer etc.
ROSE_DLL_API void setOperand(SgExpression* target, SgExpression* operand);
//!set left hand operand for binary expressions, transparently downcasting target expressions when necessary
ROSE_DLL_API void setLhsOperand(SgExpression* target, SgExpression* lhs);
//!set left hand operand for binary expression
ROSE_DLL_API void setRhsOperand(SgExpression* target, SgExpression* rhs);
//! Set original expression trees to NULL for SgValueExp or SgCastExp expressions, so you can change the value and have it unparsed correctly.
ROSE_DLL_API void removeAllOriginalExpressionTrees(SgNode* top);
// DQ (1/25/2010): Added support for directories
//! Move file to be generated in a subdirectory (will be generated by the unparser).
ROSE_DLL_API void moveToSubdirectory ( std::string directoryName, SgFile* file );
//! Supporting function to comment relocation in insertStatement() and removeStatement().
ROSE_DLL_API SgStatement* findSurroundingStatementFromSameFile(SgStatement* targetStmt, bool & surroundingStatementPreceedsTargetStatement);
//! Relocate comments and CPP directives from one statement to another.
ROSE_DLL_API void moveCommentsToNewStatement(SgStatement* sourceStatement, const std::vector<int> & indexList, SgStatement* targetStatement, bool surroundingStatementPreceedsTargetStatement);
// DQ (7/19/2015): This is required to support general unparsing of template instantations for the GNU g++
// compiler which does not permit name qualification to be used to support the expression of the namespace
// where a template instantiatoon would be places. Such name qualification would also sometimes require
// global qualification which is also not allowed by the GNU g++ compiler. These issues appear to be
// specific to the GNU compiler versions, at least versions 4.4 through 4.8.
//! Relocate the declaration to be explicitly represented in its associated namespace (required for some backend compilers to process template instantiations).
ROSE_DLL_API void moveDeclarationToAssociatedNamespace ( SgDeclarationStatement* declarationStatement );
ROSE_DLL_API bool isTemplateInstantiationNode(SgNode* node);
ROSE_DLL_API void wrapAllTemplateInstantiationsInAssociatedNamespaces(SgProject* root);
// DQ (12/1/2015): Adding support for fixup internal data struuctures that have references to statements (e.g. macro expansions).
ROSE_DLL_API void resetInternalMapsForTargetStatement(SgStatement* sourceStatement);
//@}
//------------------------------------------------------------------------
//@{
/*! @name AST repair, fix, and postprocessing.
\brief Mostly used internally when some AST pieces are built without knowing their target
scope/parent, especially during bottom-up construction of AST. The associated symbols,
parent and scope pointers cannot be set on construction then.
A set of utility functions are provided to
patch up scope, parent, symbol for them when the target scope/parent become know.
*/
//! Connect variable reference to the right variable symbols when feasible, return the number of references being fixed.
/*! In AST translation, it is possible to build a variable reference before the variable
is being declared. buildVarRefExp() will use fake initialized name and symbol as placeholders
to get the work done. Users should call fixVariableReference() when AST is complete and all
variable declarations are in place.
*/
ROSE_DLL_API int fixVariableReferences(SgNode* root);
//!Patch up symbol, scope, and parent information when a SgVariableDeclaration's scope is known.
/*!
It is possible to build a variable declaration without knowing its scope information during bottom-up construction of AST, though top-down construction is recommended in general.
In this case, we have to patch up symbol table, scope and parent information when the scope is known. This function is usually used internally within appendStatment(), insertStatement().
*/
ROSE_DLL_API void fixVariableDeclaration(SgVariableDeclaration* varDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a struct declaration was built without knowing its target scope.
ROSE_DLL_API void fixStructDeclaration(SgClassDeclaration* structDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a class declaration was built without knowing its target scope.
ROSE_DLL_API void fixClassDeclaration(SgClassDeclaration* classDecl, SgScopeStatement* scope);
//! Fix symbols, parent and scope pointers. Used internally within appendStatment(), insertStatement() etc when a namespace declaration was built without knowing its target scope.
ROSE_DLL_API void fixNamespaceDeclaration(SgNamespaceDeclarationStatement* structDecl, SgScopeStatement* scope);
//! Fix symbol table for SgLabelStatement. Used Internally when the label is built without knowing its target scope. Both parameters cannot be NULL.
ROSE_DLL_API void fixLabelStatement(SgLabelStatement* label_stmt, SgScopeStatement* scope);
//! Set a numerical label for a Fortran statement. The statement should have a enclosing function definition already. SgLabelSymbol and SgLabelRefExp are created transparently as needed.
ROSE_DLL_API void setFortranNumericLabel(SgStatement* stmt, int label_value);
//! Suggest next usable (non-conflicting) numeric label value for a Fortran function definition scope
ROSE_DLL_API int suggestNextNumericLabel(SgFunctionDefinition* func_def);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixFunctionDeclaration(SgFunctionDeclaration* stmt, SgScopeStatement* scope);
//! Fix the symbol table and set scope (only if scope in declaration is not already set).
ROSE_DLL_API void fixTemplateDeclaration(SgTemplateDeclaration* stmt, SgScopeStatement* scope);
//! A wrapper containing fixes (fixVariableDeclaration(),fixStructDeclaration(), fixLabelStatement(), etc) for all kinds statements. Should be used before attaching the statement into AST.
ROSE_DLL_API void fixStatement(SgStatement* stmt, SgScopeStatement* scope);
// DQ (6/11/2015): This reports the statements that are marked as transformed (used to debug the token-based unparsing).
//! This collects the statements that are marked as transformed (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectTransformedStatements( SgNode* node );
//! This collects the statements that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgStatement*> collectModifiedStatements( SgNode* node );
//! This collects the SgLocatedNodes that are marked as modified (a flag automatically set by all set_* generated functions) (useful in debugging).
ROSE_DLL_API std::set<SgLocatedNode*> collectModifiedLocatedNodes( SgNode* node );
//@}
//! Update defining and nondefining links due to a newly introduced function declaration. Should be used after inserting the function into a scope.
/*! This function not only set the defining and nondefining links of the newly introduced
* function declaration inside a scope, but also update other same function declarations' links
* accordingly if there are any.
* Assumption: The function has already inserted/appended/prepended into the scope before calling this function.
*/
ROSE_DLL_API void updateDefiningNondefiningLinks(SgFunctionDeclaration* func, SgScopeStatement* scope);
//------------------------------------------------------------------------
//@{
/*! @name Advanced AST transformations, analyses, and optimizations
\brief Some complex but commonly used AST transformations.
*/
//! Collect all read and write references within stmt, which can be a function, a scope statement, or a single statement. Note that a reference can be both read and written, like i++
ROSE_DLL_API bool
collectReadWriteRefs(SgStatement* stmt, std::vector<SgNode*>& readRefs, std::vector<SgNode*>& writeRefs, bool useCachedDefUse=false);
//!Collect unique variables which are read or written within a statement. Note that a variable can be both read and written. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API bool collectReadWriteVariables(SgStatement* stmt, std::set<SgInitializedName*>& readVars, std::set<SgInitializedName*>& writeVars, bool coarseGrain=true);
//!Collect read only variables within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlyVariables(SgStatement* stmt, std::set<SgInitializedName*>& readOnlyVars, bool coarseGrain=true);
//!Collect read only variable symbols within a statement. The statement can be either of a function, a scope, or a single line statement. For accesses to members of aggregate data, we return the coarse grain aggregate mem obj by default.
ROSE_DLL_API void collectReadOnlySymbols(SgStatement* stmt, std::set<SgVariableSymbol*>& readOnlySymbols, bool coarseGrain=true);
//! Check if a variable reference is used by its address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API bool isUseByAddressVariableRef(SgVarRefExp* ref);
//! Collect variable references involving use by address: including &a expression and foo(a) when type2 foo(Type& parameter) in C++
ROSE_DLL_API void collectUseByAddressVariableRefs (const SgStatement* s, std::set<SgVarRefExp* >& varSetB);
#ifndef ROSE_USE_INTERNAL_FRONTEND_DEVELOPMENT
//!Call liveness analysis on an entire project
ROSE_DLL_API LivenessAnalysis * call_liveness_analysis(SgProject* project, bool debug=false);
//!get liveIn and liveOut variables for a for loop from liveness analysis result liv.
ROSE_DLL_API void getLiveVariables(LivenessAnalysis * liv, SgForStatement* loop, std::set<SgInitializedName*>& liveIns, std::set<SgInitializedName*> & liveOuts);
#endif
//!Recognize and collect reduction variables and operations within a C/C++ loop, following OpenMP 3.0 specification for allowed reduction variable types and operation types.
ROSE_DLL_API void ReductionRecognition(SgForStatement* loop, std::set< std::pair <SgInitializedName*, OmpSupport::omp_construct_enum> > & results);
//! Constant folding an AST subtree rooted at 'r' (replacing its children with their constant values, if applicable). Please be advised that constant folding on floating point computation may decrease the accuracy of floating point computations!
/*! It is a wrapper function for ConstantFolding::constantFoldingOptimization(). Note that only r's children are replaced with their corresponding constant values, not the input SgNode r itself. You have to call this upon an expression's parent node if you want to fold the expression. */
ROSE_DLL_API void constantFolding(SgNode* r);
//!Instrument(Add a statement, often a function call) into a function right before the return points, handle multiple return statements (with duplicated statement s) and return expressions with side effects. Return the number of statements inserted.
/*! Useful when adding a runtime library call to terminate the runtime system right before the end of a program, especially for OpenMP and UPC runtime systems. Return with complex expressions with side effects are rewritten using an additional assignment statement.
*/
ROSE_DLL_API int instrumentEndOfFunction(SgFunctionDeclaration * func, SgStatement* s);
//! Remove jumps whose label is immediately after the jump. Used to clean up inlined code fragments.
ROSE_DLL_API void removeJumpsToNextStatement(SgNode*);
//! Remove labels which are not targets of any goto statements
ROSE_DLL_API void removeUnusedLabels(SgNode* top);
//! Remove consecutive labels
ROSE_DLL_API void removeConsecutiveLabels(SgNode* top);
//! Merge a variable assignment statement into a matching variable declaration statement. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original i=10 will be deleted after the merge
* if success, return true, otherwise return false (e.g. variable declaration does not match or already has an initializer)
* The original assignment stmt will be removed by default
* This function is a bit ambiguous about the merge direction, to be phased out.
*/
ROSE_DLL_API bool mergeDeclarationAndAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt, bool removeAssignStmt = true);
//! Merge an assignment into its upstream declaration statement. Callers should make sure the merge is semantically correct.
ROSE_DLL_API bool mergeAssignmentWithDeclaration (SgExprStatement* assign_stmt, SgVariableDeclaration* decl, bool removeAssignStmt = true);
//! Merge a declaration statement into a matching followed variable assignment. Callers should make sure the merge is semantically correct (by not introducing compilation errors). This function simply does the merge transformation, without eligibility check.
/*!
* e.g. int i; i=10; becomes int i=10; the original int i; will be deleted after the merge
*/
ROSE_DLL_API bool mergeDeclarationWithAssignment (SgVariableDeclaration* decl, SgExprStatement* assign_stmt);
//! Split a variable declaration with an rhs assignment into two statements: a declaration and an assignment.
/*! Return the generated assignment statement, if any
* e.g. int i =10; becomes int i; i=10;
* This can be seen as a normalization of declarations
*/
ROSE_DLL_API SgExprStatement* splitVariableDeclaration (SgVariableDeclaration* decl);
//! Split declarations within a scope into declarations and assignment statements, by default only top level declarations are considered. Return the number of declarations split.
ROSE_DLL_API int splitVariableDeclaration (SgScopeStatement* scope, bool topLevelOnly = true);
//! Replace an expression with a temporary variable and an assignment statement
/*!
Add a new temporary variable to contain the value of 'from'
Change reference to 'from' to use this new variable
Assumptions: 'from' is not within the test of a loop or 'if'
not currently traversing 'from' or the statement it is in
*/
ROSE_DLL_API SgAssignInitializer* splitExpression(SgExpression* from, std::string newName = "");
//! Split long expressions into blocks of statements
ROSE_DLL_API void splitExpressionIntoBasicBlock(SgExpression* expr);
//! Remove labeled goto statements
ROSE_DLL_API void removeLabeledGotos(SgNode* top);
//! If the given statement contains any break statements in its body, add a new label below the statement and change the breaks into gotos to that new label.
ROSE_DLL_API void changeBreakStatementsToGotos(SgStatement* loopOrSwitch);
//! Check if the body of a 'for' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfFor(SgForStatement* fs);
//! Check if the body of a 'upc_forall' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfUpcForAll(SgUpcForAllStatement* fs);
//! Check if the body of a 'while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfWhile(SgWhileStmt* ws);
//! Check if the body of a 'do .. while' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfDoWhile(SgDoWhileStmt* ws);
//! Check if the body of a 'switch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfSwitch(SgSwitchStatement* ws);
//! Check if the body of a 'case option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfCaseOption(SgCaseOptionStmt* cs);
//! Check if the body of a 'default option' statement is a SgBasicBlock, create one if not.
SgBasicBlock* ensureBasicBlockAsBodyOfDefaultOption(SgDefaultOptionStmt * cs);
//! Check if the true body of a 'if' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsTrueBodyOfIf(SgIfStmt* ifs);
//! Check if the false body of a 'if' statement is a SgBasicBlock, create one if not when the flag is true.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsFalseBodyOfIf(SgIfStmt* ifs, bool createEmptyBody = true);
//! Check if the body of a 'catch' statement is a SgBasicBlock, create one if not.
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfCatch(SgCatchOptionStmt* cos);
//! Check if the body of a SgOmpBodyStatement is a SgBasicBlock, create one if not
ROSE_DLL_API SgBasicBlock* ensureBasicBlockAsBodyOfOmpBodyStmt(SgOmpBodyStatement* ompbodyStmt);
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Remove unused basic block IR nodes added as part of normalization.
ROSE_DLL_API void cleanupNontransformedBasicBlockNode();
// DQ (1/18/2015): This is added to support better quality token-based unparsing.
//! Record where normalization have been done so that we can preform denormalizations as required for the token-based unparsing to generate minimal diffs.
ROSE_DLL_API void recordNormalizations(SgStatement* s);
//! Check if a statement is a (true or false) body of a container-like parent, such as For, Upc_forall, Do-while,
//! switch, If, Catch, OmpBodyStmt, etc
bool isBodyStatement (SgStatement* s);
//! Fix up ifs, loops, while, switch, Catch, OmpBodyStatement, etc. to have blocks as body components. It also adds an empty else body to if statements that don't have them.
void changeAllBodiesToBlocks(SgNode* top, bool createEmptyBody = true);
// The same as changeAllBodiesToBlocks(SgNode* top). Phased out.
//void changeAllLoopBodiesToBlocks(SgNode* top);
//! Make a single statement body to be a basic block. Its parent is if, while, catch, or upc_forall etc.
SgBasicBlock * makeSingleStatementBodyToBlock(SgStatement* singleStmt);
#if 0
/** If s is the body of a loop, catch, or if statement and is already a basic block,
* s is returned unmodified. Otherwise generate a SgBasicBlock between s and its parent
* (a loop, catch, or if statement, etc). */
SgLocatedNode* ensureBasicBlockAsParent(SgStatement* s);
#endif
//! Get the constant value from a constant integer expression; abort on
//! everything else. Note that signed long longs are converted to unsigned.
unsigned long long getIntegerConstantValue(SgValueExp* expr);
//! Get a statement's dependent declarations which declares the types used in the statement. The returned vector of declaration statements are sorted according to their appearance order in the original AST. Any reference to a class or template class from a namespace will treated as a reference to the enclosing namespace.
std::vector<SgDeclarationStatement*> getDependentDeclarations (SgStatement* stmt );
//! Insert an expression (new_exp )before another expression (anchor_exp) has possible side effects, without changing the original semantics. This is achieved by using a comma operator: (new_exp, anchor_exp). The comma operator is returned.
SgCommaOpExp *insertBeforeUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp);
//! Insert an expression (new_exp ) after another expression (anchor_exp) has possible side effects, without changing the original semantics. This is done by using two comma operators: type T1; ... ((T1 = anchor_exp, new_exp),T1) )... , where T1 is a temp variable saving the possible side effect of anchor_exp. The top level comma op exp is returned. The reference to T1 in T1 = anchor_exp is saved in temp_ref.
SgCommaOpExp *insertAfterUsingCommaOp (SgExpression* new_exp, SgExpression* anchor_exp, SgStatement** temp_decl = NULL, SgVarRefExp** temp_ref = NULL);
/// \brief moves the body of a function f to a new function f`;
/// f's body is replaced with code that forwards the call to f`.
/// \return a pair indicating the statement containing the call of f`
/// and an initialized name refering to the temporary variable
/// holding the result of f`. In case f returns void
/// the initialized name is NULL.
/// \param definingDeclaration the defining function declaration of f
/// \param newName the name of function f`
/// \details f's new body becomes { f`(...); } and { int res = f`(...); return res; }
/// for functions returning void and a value, respectively.
/// two function declarations are inserted in f's enclosing scope
/// \code
/// result_type f`(...); <--- (1)
/// result_type f (...) { forward call to f` }
/// result_type f`(...) { original code } <--- (2)
/// \endcode
/// Calls to f are not updated, thus in the transformed code all
/// calls will continue calling f (this is also true for
/// recursive function calls from within the body of f`).
/// After the function has created the wrapper,
/// definingDeclaration becomes the wrapper function
/// The definition of f` is the next entry in the
/// statement list; the forward declaration of f` is the previous
/// entry in the statement list.
/// \pre definingDeclaration must be a defining declaration of a
/// free standing function.
/// typeid(SgFunctionDeclaration) == typeid(definingDeclaration)
/// i.e., this function is NOT implemented for class member functions,
/// template functions, procedures, etc.
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, SgName newName);
/// \overload
/// \tparam NameGen functor that generates a new name based on the old name.
/// interface: SgName nameGen(const SgName&)
/// \param nameGen name generator
/// \brief see wrapFunction for details
template <class NameGen>
std::pair<SgStatement*, SgInitializedName*>
wrapFunction(SgFunctionDeclaration& definingDeclaration, NameGen nameGen)
{
return wrapFunction(definingDeclaration, nameGen(definingDeclaration.get_name()));
}
/// \brief convenience function that returns the first initialized name in a
/// list of variable declarations.
SgInitializedName& getFirstVariable(SgVariableDeclaration& vardecl);
//@}
// DQ (6/7/2012): Unclear where this function should go...
bool hasTemplateSyntax( const SgName & name );
#if 0
//------------------------AST dump, stringify-----------------------------
//------------------------------------------------------------------------
std::string buildOperatorString ( SgNode* astNode ); //transformationSupport.h
// do we need these?
std::string dump_node(const SgNode* astNode);
std::string dump_tree(const SgNode* astNode);
// or a friendly version of unparseToString(), as a memeber function
std::string SgNode::toString(bool asSubTree=true); // dump node or subtree
//----------------------------AST comparison------------------------------
//------------------------------------------------------------------------
// How to get generic functions for comparison?
bool isNodeEqual(SgNode* node1, SgNode* node2); //?
bool isTreeEqual(SgNode* tree1, SgNode* tree2);
//! Are two expressions equal (using a deep comparison)?
bool expressionTreeEqual(SgExpression*, SgExpression*);
//! Are corresponding expressions in two lists equal (using a deep comparison)?
bool expressionTreeEqualStar(const SgExpressionPtrList&,
const SgExpressionPtrList&);
//----------------------AST verfication/repair----------------------------
//------------------------------------------------------------------------
// sanity check of AST subtree, any suggestions?
// TODO
verifySgNode(SgNode* node, bool subTree=true);
//src/midend/astDiagnostics/AstConsistencyTests.h
// AstTests::runAllTests(SgProject * )
//src/midend/astUtil/astInterface/AstInterface.h.C
//FixSgProject(SgProject &project)
//FixSgTree(SgNode* r)
//src/frontend/SageIII/astPostProcessing
//AstPostProcessing(SgNode * node)
//--------------------------AST modification------------------------------
//------------------------------------------------------------------------
// any operations changing AST tree, including
// insert, copy, delete(remove), replace
// insert before or after some point, argument list is consistent with LowLevelRewrite
void insertAst(SgNode* targetPosition, SgNode* newNode, bool insertBefore=true);
// previous examples
//void myStatementInsert(SgStatement* target,...)
// void AstInterfaceBase::InsertStmt(AstNodePtr const & orig, AstNodePtr const &n, bool insertbefore, bool extractfromBasicBlock)
// copy
// copy children of one basic block to another basic block
//void appendStatementCopy (const SgBasicBlock* a, SgBasicBlock* b);
void copyStatements (const SgBasicBlock* src, SgBasicBlock* dst);
// delete (remove) a node or a whole subtree
void removeSgNode(SgNode* targetNode); // need this?
void removeSgNodeTree(SgNode* subtree); // need this?
void removeStatement( SgStatement* targetStmt);
//Move = delete + insert
void moveAst (SgNode* src, SgNode* target); // need this?
// similar to
void moveStatements (SgBasicBlock* src, SgBasicBlock* target);
// replace= delete old + insert new (via building or copying)
// DQ (1/25/2010): This does not appear to exist as a definition anywhere in ROSE.
// void replaceAst(SgNode* oldNode, SgNode* newNode);
//void replaceChild(SgNode* parent, SgNode* from, SgNode* to);
//bool AstInterface::ReplaceAst( const AstNodePtr& orig, const AstNodePtr& n)
//--------------------------AST transformations---------------------------
//------------------------------------------------------------------------
// Advanced AST modifications through basic AST modifications
// Might not be included in AST utitlity list, but listed here for the record.
// extract statements/content from a scope
void flattenBlocks(SgNode* n);
//src/midend/astInlining/inlinerSupport.h
void renameVariables(SgNode* n);
void renameLabels(SgNode* n, SgFunctionDefinition* enclosingFunctionDefinition);
void simpleCopyAndConstantPropagation(SgNode* top);
void changeAllMembersToPublic(SgNode* n);
void removeVariableDeclaration(SgInitializedName* initname);
//! Convert something like "int a = foo();" into "int a; a = foo();"
SgAssignOp* convertInitializerIntoAssignment(SgAssignInitializer* init);
//! Rewrites a while or for loop so that the official test is changed to
//! "true" and what had previously been the test is now an if-break
//! combination (with an inverted condition) at the beginning of the loop
//! body
void pushTestIntoBody(LoopStatement* loopStmt);
//programTransformation/finiteDifferencing/finiteDifferencing.h
//! Move variables declared in a for statement to just outside that statement.
void moveForDeclaredVariables(SgNode* root);
//------------------------ Is/Has functions ------------------------------
//------------------------------------------------------------------------
// misc. boolean functions
// some of them could moved to SgXXX class as a member function
bool isOverloaded (SgFunctionDeclaration * functionDeclaration);
bool isSwitchCond (const SgStatement* s);
bool isIfCond (const SgStatement* s);
bool isWhileCond (const SgStatement* s);
bool isStdNamespace (const SgScopeStatement* scope);
bool isTemplateInst (const SgDeclarationStatement* decl);
bool isCtor (const SgFunctionDeclaration* func);
bool isDtor (const SgFunctionDeclaration* func);
// src/midend/astInlining/typeTraits.h
bool hasTrivialDestructor(SgType* t);
ROSE_DLL_API bool isNonconstReference(SgType* t);
ROSE_DLL_API bool isReferenceType(SgType* t);
// generic ones, or move to the SgXXX class as a member function
bool isConst(SgNode* node); // const type, variable, function, etc.
// .... and more
bool isConstType (const SgType* type);
bool isConstFunction (const SgFunctionDeclaration* decl);
bool isMemberVariable(const SgInitializedName & var);
//bool isMemberVariable(const SgNode& in);
bool isPrototypeInScope (SgScopeStatement * scope,
SgFunctionDeclaration * functionDeclaration,
SgDeclarationStatement * startingAtDeclaration);
bool MayRedefined(SgExpression* expr, SgNode* root);
// bool isPotentiallyModified(SgExpression* expr, SgNode* root); // inlinderSupport.h
bool hasAddressTaken(SgExpression* expr, SgNode* root);
//src/midend/astInlining/inlinerSupport.C
// can also classified as topdown search
bool containsVariableReference(SgNode* root, SgInitializedName* var);
bool isDeclarationOf(SgVariableDeclaration* decl, SgInitializedName* var);
bool isPotentiallyModifiedDuringLifeOf(SgBasicBlock* sc,
SgInitializedName* toCheck,
SgInitializedName* lifetime)
//src/midend/programTransformation/partialRedundancyElimination/pre.h
bool anyOfListPotentiallyModifiedIn(const std::vector<SgVariableSymbol*>& syms, SgNode* n);
//------------------------ loop handling ---------------------------------
//------------------------------------------------------------------------
//get and set loop control expressions
// 0: init expr, 1: condition expr, 2: stride expr
SgExpression* getForLoopTripleValues(int valuetype,SgForStatement* forstmt );
int setForLoopTripleValues(int valuetype,SgForStatement* forstmt, SgExpression* exp);
bool isLoopIndexVarRef(SgForStatement* forstmt, SgVarRefExp *varref);
SgInitializedName * getLoopIndexVar(SgForStatement* forstmt);
//------------------------expressions-------------------------------------
//------------------------------------------------------------------------
//src/midend/programTransformation/partialRedundancyElimination/pre.h
int countComputationsOfExpressionIn(SgExpression* expr, SgNode* root);
//src/midend/astInlining/replaceExpressionWithStatement.h
void replaceAssignmentStmtWithStatement(SgExprStatement* from, StatementGenerator* to);
void replaceSubexpressionWithStatement(SgExpression* from,
StatementGenerator* to);
SgExpression* getRootOfExpression(SgExpression* n);
//--------------------------preprocessing info. -------------------------
//------------------------------------------------------------------------
//! Removes all preprocessing information at a given position.
void cutPreprocInfo (SgBasicBlock* b,
PreprocessingInfo::RelativePositionType pos,
AttachedPreprocessingInfoType& save_buf);
//! Pastes preprocessing information at the front of a statement.
void pastePreprocInfoFront (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
//! Pastes preprocessing information at the back of a statement.
void pastePreprocInfoBack (AttachedPreprocessingInfoType& save_buf,
SgStatement* s);
/*!
* \brief Moves 'before' preprocessing information.
* Moves all preprocessing information attached 'before' the source
* statement to the front of the destination statement.
*/
// a generic one for all
/// void movePreprocessingInfo(src, dest, RelativePositionType);
void moveBeforePreprocInfo (SgStatement* src, SgStatement* dest);
void moveInsidePreprocInfo (SgBasicBlock* src, SgBasicBlock* dest);
void moveAfterPreprocInfo (SgStatement* src, SgStatement* dest);
//--------------------------------operator--------------------------------
//------------------------------------------------------------------------
from transformationSupport.h, not sure if they should be included here
/* return enum code for SAGE operators */
operatorCodeType classifyOverloadedOperator(); // transformationSupport.h
/*! \brief generates a source code string from operator name.
This function returns a string representing the elementwise operator (for primative types)
that would be match that associated with the overloaded operator for a user-defined
abstractions (e.g. identifyOperator("operator+()") returns "+").
*/
std::string stringifyOperator (std::string name);
//--------------------------------macro ----------------------------------
//------------------------------------------------------------------------
std::string buildMacro ( std::string s ); //transformationSupport.h
//--------------------------------access functions---------------------------
//----------------------------------get/set sth.-----------------------------
// several categories:
* get/set a direct child/grandchild node or fields
* get/set a property flag value
* get a descendent child node using preorder searching
* get an ancestor node using bottomup/reverse searching
// SgName or string?
std::string getFunctionName (SgFunctionCallExp* functionCallExp);
std::string getFunctionTypeName ( SgFunctionCallExp* functionCallExpression );
// do we need them anymore? or existing member functions are enought?
// a generic one:
std::string get_name (const SgNode* node);
std::string get_name (const SgDeclarationStatement * declaration);
// get/set some property: should moved to SgXXX as an inherent memeber function?
// access modifier
void setExtern (SgFunctionDeclartion*)
void clearExtern()
// similarly for other declarations and other properties
void setExtern (SgVariableDeclaration*)
void setPublic()
void setPrivate()
#endif
// DQ (1/23/2013): Added support for generated a set of source sequence entries.
std::set<unsigned int> collectSourceSequenceNumbers( SgNode* astNode );
//--------------------------------Type Traits (C++)---------------------------
bool HasNoThrowAssign(const SgType * const inputType);
bool HasNoThrowCopy(const SgType * const inputType);
bool HasNoThrowConstructor(const SgType * const inputType);
bool HasTrivialAssign(const SgType * const inputType);
bool HasTrivialCopy(const SgType * const inputType);
bool HasTrivialConstructor(const SgType * const inputType);
bool HasTrivialDestructor(const SgType * const inputType);
bool HasVirtualDestructor(const SgType * const inputType);
bool IsBaseOf(const SgType * const inputBaseType, const SgType * const inputDerivedType);
bool IsAbstract(const SgType * const inputType);
bool IsClass(const SgType * const inputType);
bool IsEmpty(const SgType * const inputType);
bool IsEnum(const SgType * const inputType);
bool IsPod(const SgType * const inputType);
bool IsPolymorphic(const SgType * const inputType);
bool IsStandardLayout(const SgType * const inputType);
bool IsLiteralType(const SgType * const inputType);
bool IsTrivial(const SgType * const inputType);
bool IsUnion(const SgType * const inputType);
SgType * UnderlyingType(SgType *type);
// DQ (3/2/2014): Added a new interface function (used in the snippet insertion support).
// void supportForInitializedNameLists ( SgScopeStatement* scope, SgInitializedNamePtrList & variableList );
// DQ (3/4/2014): Added support for testing two trees for equivalents using the AST iterators.
bool isStructurallyEquivalentAST( SgNode* tree1, SgNode* tree2 );
// JP (10/14/24): Moved code to evaluate a const integer expression (like in array size definitions) to SageInterface
/*! The datastructure is used as the return type for SageInterface::evaluateConstIntegerExpression(). One needs to always check whether hasValue_ is true before accessing value_ */
struct const_int_expr_t {
size_t value_;
bool hasValue_;
};
/*! \brief The function tries to evaluate const integer expressions (such as are used in array dimension sizes). It follows variable symbols, and requires constness. */
struct const_int_expr_t evaluateConstIntegerExpression(SgExpression *expr);
// JP (9/17/14): Added function to test whether two SgType* are equivalent or not
bool checkTypesAreEqual(SgType *typeA, SgType *typeB);
//--------------------------------Java interface functions ---------------------
#ifdef ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
ROSE_DLL_API std::string getTempDirectory(SgProject *project);
ROSE_DLL_API void destroyTempDirectory(std::string);
ROSE_DLL_API SgFile *processFile(SgProject *, std::string, bool unparse = false);
ROSE_DLL_API std::string preprocessPackage(SgProject *, std::string);
ROSE_DLL_API std::string preprocessImport(SgProject *, std::string);
ROSE_DLL_API SgFile* preprocessCompilationUnit(SgProject *, std::string, std::string, bool unparse = true);
ROSE_DLL_API SgClassDefinition *findJavaPackage(SgScopeStatement *, std::string);
ROSE_DLL_API SgClassDefinition *findOrInsertJavaPackage(SgProject *, std::string, bool create_directory = false);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassDefinition *package_definition, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, std::string, std::string);
ROSE_DLL_API SgClassDeclaration *findOrImportJavaClass(SgProject *, SgClassType *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassDefinition *);
ROSE_DLL_API SgMemberFunctionDeclaration *findJavaMain(SgClassType *);
#endif // ROSE_BUILD_JAVA_LANGUAGE_SUPPORT
// DQ (8/31/2016): Making this a template function so that we can have it work with user defined filters.
//! This function detects template instantiations that are relevant when filters are used.
/*!
EDG normalizes some in-class template functions and member functions to be redefined outside of a class. this causes the associated template instantiations
to be declared outside of the class, and to be marked as compiler generated (since the compiler generated form outside of the class declaration).
ROSE captures the function definitions, but in the new location (defined outside of the class declaration). This can confuse some simple tests
for template instantiations that are a part of definitions in a file, thus we have this function to detect this specific normalization.
*/
template < class T >
bool isTemplateInstantiationFromTemplateDeclarationSatisfyingFilter (SgFunctionDeclaration* function, T* filter )
{
// DQ (9/1/2016): This function is called in the Call graph generation to avoid filtering out EDG normalized
// function template instnatiations (which come from normalized template functions and member functions).
// Note that because of the EDG normailzation the membr function is moved outside of the class, and
// thus marked as compiler generated. However the template instantiations are always marked as compiler
// generated (if not specializations) and so we want to include a template instantiation that is marked
// as compiler generated, but is from a template declaration that satisfyied a specific user defined filter.
// The complexity of this detection is isolated here, but knowing that it must be called is more complex.
// This function is call in the CG.C file of tests/nonsmoke/functional/roseTests/programAnalysisTests/testCallGraphAnalysis.
bool retval = false;
#define DEBUG_TEMPLATE_NORMALIZATION_DETECTION 0
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf ("In isNormalizedTemplateInstantiation(): function = %p = %s = %s \n",function,function->class_name().c_str(),function->get_name().str());
#endif
// Test for this to be a template instantation (in which case it was marked as
// compiler generated but we may want to allow it to be used in the call graph,
// if it's template was a part was defined in the current directory).
SgTemplateInstantiationFunctionDecl* templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(function);
SgTemplateInstantiationMemberFunctionDecl* templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(function);
if (templateInstantiationFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationFunction = isSgTemplateInstantiationFunctionDecl(templateInstantiationFunction->get_firstNondefiningDeclaration());
SgTemplateFunctionDeclaration* templateFunctionDeclaration = templateInstantiationFunction->get_templateDeclaration();
if (templateFunctionDeclaration != NULL)
{
retval = filter->operator()(templateFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
else
{
if (templateInstantiationMemberFunction != NULL)
{
// When the defining function has been normalized by EDG, only the non-defining declaration will have a source position.
templateInstantiationMemberFunction = isSgTemplateInstantiationMemberFunctionDecl(templateInstantiationMemberFunction->get_firstNondefiningDeclaration());
SgTemplateMemberFunctionDeclaration* templateMemberFunctionDeclaration = templateInstantiationMemberFunction->get_templateDeclaration();
if (templateMemberFunctionDeclaration != NULL)
{
retval = filter->operator()(templateMemberFunctionDeclaration);
}
else
{
// Assume false.
}
#if DEBUG_TEMPLATE_NORMALIZATION_DETECTION
printf (" --- case of templateInstantiationMemberFunction: retval = %s \n",retval ? "true" : "false");
#endif
}
}
return retval;
}
}// end of namespace
#endif
|
uniform_grid_environment.h | // -----------------------------------------------------------------------------
//
// Copyright (C) 2021 CERN & Newcastle University for the benefit of the
// BioDynaMo collaboration. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
#define CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
#include <assert.h>
#include <omp.h>
#include <algorithm>
#include <array>
#include <atomic>
#include <cmath>
#include <iostream>
#include <limits>
#include <memory>
#include <mutex>
#ifdef LINUX
#include <parallel/algorithm>
#endif // LINUX
#include <utility>
#include <vector>
#include <morton/morton.h> // NOLINT
#include "core/container/agent_vector.h"
#include "core/container/fixed_size_vector.h"
#include "core/container/inline_vector.h"
#include "core/container/math_array.h"
#include "core/container/parallel_resize_vector.h"
#include "core/environment/environment.h"
#include "core/environment/morton_order.h"
#include "core/functor.h"
#include "core/load_balance_info.h"
#include "core/param/param.h"
#include "core/resource_manager.h"
#include "core/util/log.h"
#include "core/util/spinlock.h"
namespace bdm {
namespace detail {
struct InitializeGPUData;
} // namespace detail
/// A class that represents Cartesian 3D grid
class UniformGridEnvironment : public Environment {
// MechanicalForcesOpCuda needs access to some UniformGridEnvironment private
// members to reconstruct
// the grid on GPU (same for MechanicalForcesOpOpenCL)
friend struct MechanicalForcesOpCuda;
friend struct ::bdm::detail::InitializeGPUData;
friend struct MechanicalForcesOpOpenCL;
friend class SchedulerTest;
public:
/// A single unit cube of the grid
struct Box {
Spinlock lock_;
// std::atomic<bool> timestamp_;
uint32_t timestamp_;
/// start value of the linked list of agents inside this box.
/// Next element can be found at `successors_[start_]`
AgentHandle start_;
/// length of the linked list (i.e. number of agents)
/// uint64_t, because sizeof(Box) = 16, for uint16_t and uint64_t
uint16_t length_;
Box() : timestamp_(0), start_(AgentHandle()), length_(0) {}
/// Copy Constructor required for boxes_.resize()
/// Since box values will be overwritten afterwards it forwards to the
/// default ctor
Box(const Box& other) : Box() {}
Box& operator=(const Box& other) {
// start_ = other.start_.load(std::memory_order_relaxed);
// length_ = other.length_.load(std::memory_order_relaxed);
start_ = other.start_;
length_ = other.length_;
return *this;
}
bool IsEmpty(uint64_t grid_timestamp) const {
return grid_timestamp != timestamp_;
}
uint16_t Size(uint64_t grid_timestamp) const {
if (IsEmpty(grid_timestamp)) {
return 0;
}
return length_;
}
/// @brief Adds an agent to this box
///
/// @param[in] agent The object's identifier
/// @param AddObject successors The successors
void AddObject(AgentHandle ah, AgentVector<AgentHandle>* successors,
UniformGridEnvironment* grid) {
std::lock_guard<Spinlock> lock_guard(lock_);
if (timestamp_ != grid->timestamp_) {
timestamp_ = grid->timestamp_;
length_ = 1;
start_ = ah;
} else {
length_++;
(*successors)[ah] = start_;
start_ = ah;
}
}
/// An iterator that iterates over the cells in this box
struct Iterator {
Iterator(UniformGridEnvironment* grid, const Box* box)
: grid_(grid), current_value_(box->start_), countdown_(box->length_) {
if (grid->timestamp_ != box->timestamp_) {
countdown_ = 0;
}
}
bool IsAtEnd() { return countdown_ <= 0; }
Iterator& operator++() {
countdown_--;
if (countdown_ > 0) {
current_value_ = grid_->successors_[current_value_];
}
return *this;
}
AgentHandle operator*() const { return current_value_; }
/// Pointer to the neighbor grid; for accessing the successor_ list
UniformGridEnvironment* grid_;
/// The current agent to be considered
AgentHandle current_value_;
/// The remain number of agents to consider
int countdown_ = 0;
};
Iterator begin() const { // NOLINT
auto* grid = static_cast<UniformGridEnvironment*>(
Simulation::GetActive()->GetEnvironment());
return Iterator(grid, this);
}
};
/// An iterator that iterates over the boxes in this grid
struct NeighborIterator {
explicit NeighborIterator(
const FixedSizeVector<const Box*, 27>& neighbor_boxes,
uint64_t grid_timestamp)
: neighbor_boxes_(neighbor_boxes),
// start iterator from box 0
box_iterator_(neighbor_boxes_[0]->begin()),
grid_timestamp_(grid_timestamp) {
// if first box is empty
if (neighbor_boxes_[0]->IsEmpty(grid_timestamp)) {
ForwardToNonEmptyBox(grid_timestamp);
}
}
bool IsAtEnd() const { return is_end_; }
AgentHandle operator*() const { return *box_iterator_; }
/// Version where empty neighbor boxes are allowed
NeighborIterator& operator++() {
++box_iterator_;
// if iterator of current box has come to an end, continue with next box
if (box_iterator_.IsAtEnd()) {
return ForwardToNonEmptyBox(grid_timestamp_);
}
return *this;
}
private:
/// The 27 neighbor boxes that will be searched for agents
const FixedSizeVector<const Box*, 27>& neighbor_boxes_;
/// The box that shall be considered to iterate over for finding simulation
/// objects
typename Box::Iterator box_iterator_;
uint64_t grid_timestamp_;
/// The id of the box to be considered (i.e. value between 0 - 26)
uint16_t box_idx_ = 0;
/// Flag to indicate that all the neighbor boxes have been searched through
bool is_end_ = false;
/// Forwards the iterator to the next non empty box and returns itself
/// If there are no non empty boxes is_end_ is set to true
NeighborIterator& ForwardToNonEmptyBox(uint64_t grid_timestamp) {
// increment box id until non empty box has been found
while (++box_idx_ < neighbor_boxes_.size()) {
// box is empty or uninitialized (padding box) -> continue
if (neighbor_boxes_[box_idx_]->IsEmpty(grid_timestamp)) {
continue;
}
// a non-empty box has been found
box_iterator_ = neighbor_boxes_[box_idx_]->begin();
return *this;
}
// all remaining boxes have been empty; reached end
is_end_ = true;
return *this;
}
};
/// Enum that determines the degree of adjacency in search neighbor boxes
// todo(ahmad): currently only kHigh is supported (hardcoded 26 several
// places)
enum Adjacency {
kLow, /**< The closest 8 neighboring boxes */
kMedium, /**< The closest 18 neighboring boxes */
kHigh /**< The closest 26 neighboring boxes */
};
explicit UniformGridEnvironment(Adjacency adjacency = kHigh)
: adjacency_(adjacency), lbi_(this) {}
UniformGridEnvironment(UniformGridEnvironment const&) = delete;
void operator=(UniformGridEnvironment const&) = delete;
virtual ~UniformGridEnvironment() {}
/// Clears the grid
void Clear() override {
if (!is_custom_box_length_) {
box_length_ = 1;
}
box_length_squared_ = 1;
num_boxes_axis_ = {{0}};
num_boxes_xy_ = 0;
int32_t inf = std::numeric_limits<int32_t>::max();
grid_dimensions_ = {inf, -inf, inf, -inf, inf, -inf};
threshold_dimensions_ = {inf, -inf};
successors_.clear();
has_grown_ = false;
}
struct AssignToBoxesFunctor : public Functor<void, Agent*, AgentHandle> {
explicit AssignToBoxesFunctor(UniformGridEnvironment* grid) : grid_(grid) {}
void operator()(Agent* agent, AgentHandle ah) override {
const auto& position = agent->GetPosition();
auto idx = grid_->GetBoxIndex(position);
auto box = grid_->GetBoxPointer(idx);
box->AddObject(ah, &(grid_->successors_), grid_);
agent->SetBoxIdx(idx);
}
private:
UniformGridEnvironment* grid_ = nullptr;
};
void SetBoxLength(int32_t bl) {
box_length_ = bl;
is_custom_box_length_ = true;
}
int32_t GetBoxLength() { return box_length_; }
/// Updates the grid, as agents may have moved, added or deleted
void Update() override {
auto* rm = Simulation::GetActive()->GetResourceManager();
if (rm->GetNumAgents() != 0) {
Clear();
timestamp_++;
auto inf = Math::kInfinity;
std::array<double, 6> tmp_dim = {{inf, -inf, inf, -inf, inf, -inf}};
CalcSimDimensionsAndLargestAgent(&tmp_dim);
RoundOffGridDimensions(tmp_dim);
// If the box_length_ is not set manually, we set it to the largest agent
// size
if (!is_custom_box_length_) {
auto los = ceil(GetLargestAgentSize());
assert(
los > 0 &&
"The largest object size was found to be 0. Please check if your "
"cells are correctly initialized.");
box_length_ = los;
}
box_length_squared_ = box_length_ * box_length_;
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
int r = dimension_length % box_length_;
// If the grid is not perfectly divisible along each dimension by the
// resolution, extend the grid so that it is
if (r != 0) {
// std::abs for the case that box_length_ > dimension_length
grid_dimensions_[2 * i + 1] += (box_length_ - r);
} else {
// Else extend the grid dimension with one row, because the outmost
// object lies exactly on the border
grid_dimensions_[2 * i + 1] += box_length_;
}
}
// Pad the grid to avoid out of bounds check when search neighbors
for (int i = 0; i < 3; i++) {
grid_dimensions_[2 * i] -= box_length_;
grid_dimensions_[2 * i + 1] += box_length_;
}
// Calculate how many boxes fit along each dimension
for (int i = 0; i < 3; i++) {
int dimension_length =
grid_dimensions_[2 * i + 1] - grid_dimensions_[2 * i];
assert((dimension_length % box_length_ == 0) &&
"The grid dimensions are not a multiple of its box length");
num_boxes_axis_[i] = dimension_length / box_length_;
}
num_boxes_xy_ = num_boxes_axis_[0] * num_boxes_axis_[1];
total_num_boxes_ = num_boxes_xy_ * num_boxes_axis_[2];
CheckGridGrowth();
// resize boxes_
if (boxes_.size() != total_num_boxes_) {
if (boxes_.capacity() < total_num_boxes_) {
boxes_.reserve(total_num_boxes_ * 2);
}
boxes_.resize(total_num_boxes_);
}
successors_.reserve();
// Assign agents to boxes
auto* param = Simulation::GetActive()->GetParam();
AssignToBoxesFunctor functor(this);
rm->ForEachAgentParallel(param->scheduling_batch_size, functor);
if (param->bound_space) {
int min = param->min_bound;
int max = param->max_bound;
threshold_dimensions_ = {min, max};
}
if (param->thread_safety_mechanism ==
Param::ThreadSafetyMechanism::kAutomatic) {
nb_mutex_builder_->Update();
}
} else {
// There are no agents in this simulation
auto* param = Simulation::GetActive()->GetParam();
bool uninitialized = boxes_.size() == 0;
if (uninitialized && param->bound_space) {
// Simulation has never had any agents
// Initialize grid dimensions with `Param::min_bound` and
// `Param::max_bound`
// This is required for the DiffusionGrid
int min = param->min_bound;
int max = param->max_bound;
grid_dimensions_ = {min, max, min, max, min, max};
threshold_dimensions_ = {min, max};
has_grown_ = true;
} else if (!uninitialized) {
// all agents have been removed in the last iteration
// grid state remains the same, but we have to set has_grown_ to false
// otherwise the DiffusionGrid will attempt to resize
has_grown_ = false;
} else {
Log::Fatal(
"UniformGridEnvironment",
"You tried to initialize an empty simulation without bound space. "
"Therefore we cannot determine the size of the simulation space. "
"Please add agents, or set Param::bound_space, "
"Param::min_bound, and Param::max_bound.");
}
}
}
/// @brief Calculates the squared euclidian distance between two points
/// in 3D
///
/// @param[in] pos1 Position of the first point
/// @param[in] pos2 Position of the second point
///
/// @return The distance between the two points
///
inline double SquaredEuclideanDistance(const Double3& pos1,
const Double3& pos2) const {
const double dx = pos2[0] - pos1[0];
const double dy = pos2[1] - pos1[1];
const double dz = pos2[2] - pos1[2];
return (dx * dx + dy * dy + dz * dz);
}
inline bool WithinSquaredEuclideanDistance(double squared_radius,
const Double3& pos1,
const Double3& pos2) const {
const double dx = pos2[0] - pos1[0];
const double dx2 = dx * dx;
if (dx2 > squared_radius) {
return false;
}
const double dy = pos2[1] - pos1[1];
const double dy2_plus_dx2 = dy * dy + dx2;
if (dy2_plus_dx2 > squared_radius) {
return false;
}
const double dz = pos2[2] - pos1[2];
const double distance = dz * dz + dy2_plus_dx2;
return distance < squared_radius;
}
LoadBalanceInfo* GetLoadBalanceInfo() override {
lbi_.Update();
return &lbi_;
}
/// @brief Applies the given lambda to each neighbor of the specified
/// agent is within the squared radius (i.e. the criteria)
///
/// In simulation code do not use this function directly. Use the same
/// function from the execution context (e.g. `InPlaceExecutionContext`)
///
/// @param[in] lambda The operation as a lambda
/// @param query The query object
/// @param squared_radius The squared search radius (type: double*)
///
void ForEachNeighbor(Functor<void, Agent*, double>& lambda,
const Agent& query, double squared_radius) override {
if (squared_radius > box_length_squared_) {
Log::Fatal(
"UniformGridEnvironment::ForEachNeighbor",
"The requested search radius (", std::sqrt(squared_radius), ")",
" of the neighborhood search exceeds the "
"box length (",
box_length_, "). The resulting neighborhood would be incomplete.");
}
const auto& position = query.GetPosition();
auto idx = query.GetBoxIdx();
FixedSizeVector<const Box*, 27> neighbor_boxes;
GetMooreBoxes(&neighbor_boxes, idx);
auto* rm = Simulation::GetActive()->GetResourceManager();
NeighborIterator ni(neighbor_boxes, timestamp_);
const unsigned batch_size = 64;
uint64_t size = 0;
Agent* agents[batch_size] __attribute__((aligned(64)));
double x[batch_size] __attribute__((aligned(64)));
double y[batch_size] __attribute__((aligned(64)));
double z[batch_size] __attribute__((aligned(64)));
double squared_distance[batch_size] __attribute__((aligned(64)));
auto process_batch = [&]() {
#pragma omp simd
for (uint64_t i = 0; i < size; ++i) {
const double dx = x[i] - position[0];
const double dy = y[i] - position[1];
const double dz = z[i] - position[2];
squared_distance[i] = dx * dx + dy * dy + dz * dz;
}
for (uint64_t i = 0; i < size; ++i) {
if (squared_distance[i] < squared_radius) {
lambda(agents[i], squared_distance[i]);
}
}
size = 0;
};
while (!ni.IsAtEnd()) {
auto ah = *ni;
// increment iterator already here to hide memory latency
++ni;
auto* agent = rm->GetAgent(ah);
if (agent != &query) {
agents[size] = agent;
const auto& pos = agent->GetPosition();
x[size] = pos[0];
y[size] = pos[1];
z[size] = pos[2];
size++;
if (size == batch_size) {
process_batch();
}
}
}
process_batch();
}
/// @brief Return the box index in the one dimensional array of the box
/// that contains the position
///
/// @param[in] position The position of the object
///
/// @return The box index.
///
size_t GetBoxIndex(const Double3& position) const {
std::array<uint64_t, 3> box_coord;
box_coord[0] = (floor(position[0]) - grid_dimensions_[0]) / box_length_;
box_coord[1] = (floor(position[1]) - grid_dimensions_[2]) / box_length_;
box_coord[2] = (floor(position[2]) - grid_dimensions_[4]) / box_length_;
return GetBoxIndex(box_coord);
}
std::array<int32_t, 6> GetDimensions() const override {
return grid_dimensions_;
}
std::array<int32_t, 2> GetDimensionThresholds() const override {
return threshold_dimensions_;
}
void GetNumBoxesAxis(uint32_t* nba) {
nba[0] = num_boxes_axis_[0];
nba[1] = num_boxes_axis_[1];
nba[2] = num_boxes_axis_[2];
}
uint64_t GetNumBoxes() const { return boxes_.size(); }
std::array<uint64_t, 3> GetBoxCoordinates(size_t box_idx) const {
std::array<uint64_t, 3> box_coord;
box_coord[2] = box_idx / num_boxes_xy_;
auto remainder = box_idx % num_boxes_xy_;
box_coord[1] = remainder / num_boxes_axis_[0];
box_coord[0] = remainder % num_boxes_axis_[0];
return box_coord;
}
// NeighborMutex ---------------------------------------------------------
/// This class ensures thread-safety for the InPlaceExecutionContext for the
/// case
/// that an agent modifies its neighbors.
class GridNeighborMutexBuilder : public Environment::NeighborMutexBuilder {
public:
/// The NeighborMutex class is a synchronization primitive that can be
/// used to protect agents data from being simultaneously accessed by
/// multiple threads.
class GridNeighborMutex
: public Environment::NeighborMutexBuilder::NeighborMutex {
public:
GridNeighborMutex(const FixedSizeVector<uint64_t, 27>& mutex_indices,
GridNeighborMutexBuilder* mutex_builder)
: mutex_indices_(mutex_indices), mutex_builder_(mutex_builder) {
// Deadlocks occur if mutliple threads try to acquire the same locks,
// but in different order.
// -> sort to avoid deadlocks - see lock ordering
std::sort(mutex_indices_.begin(), mutex_indices_.end());
}
virtual ~GridNeighborMutex() {}
void lock() override { // NOLINT
for (auto idx : mutex_indices_) {
auto& mutex = mutex_builder_->mutexes_[idx].mutex_;
// acquire lock (and spin if another thread is holding it)
while (mutex.test_and_set(std::memory_order_acquire)) {
}
}
}
void unlock() override { // NOLINT
for (auto idx : mutex_indices_) {
auto& mutex = mutex_builder_->mutexes_[idx].mutex_;
mutex.clear(std::memory_order_release);
}
}
void SetMutexIndices(const FixedSizeVector<uint64_t, 27>& indices) {
mutex_indices_ = indices;
std::sort(mutex_indices_.begin(), mutex_indices_.end());
}
private:
FixedSizeVector<uint64_t, 27> mutex_indices_;
GridNeighborMutexBuilder* mutex_builder_;
};
/// Used to store mutexes in a vector.
/// Always creates a new mutex (even for the copy constructor)
struct MutexWrapper {
MutexWrapper() {}
MutexWrapper(const MutexWrapper&) {}
std::atomic_flag mutex_ = ATOMIC_FLAG_INIT;
};
virtual ~GridNeighborMutexBuilder() {}
void Update() {
auto* grid = static_cast<UniformGridEnvironment*>(
Simulation::GetActive()->GetEnvironment());
mutexes_.resize(grid->GetNumBoxes());
}
NeighborMutex* GetMutex(uint64_t box_idx) override;
private:
/// one mutex for each box in `UniformGridEnvironment::boxes_`
std::vector<MutexWrapper> mutexes_;
};
/// Returns the `NeighborMutexBuilder`. The client use it to create a
/// `NeighborMutex`.
NeighborMutexBuilder* GetNeighborMutexBuilder() override {
return nb_mutex_builder_.get();
}
private:
class LoadBalanceInfoUG : public LoadBalanceInfo {
public:
LoadBalanceInfoUG(UniformGridEnvironment* grid);
virtual ~LoadBalanceInfoUG();
void Update();
void CallHandleIteratorConsumer(
uint64_t start, uint64_t end,
Functor<void, Iterator<AgentHandle>*>& f) const override;
private:
UniformGridEnvironment* grid_;
MortonOrder mo_;
ParallelResizeVector<Box*> sorted_boxes_;
ParallelResizeVector<uint64_t> cummulated_agents_;
struct InitializeVectorFunctor : public Functor<void, Iterator<uint64_t>*> {
UniformGridEnvironment* grid;
uint64_t start;
ParallelResizeVector<Box*>& sorted_boxes;
ParallelResizeVector<uint64_t>& cummulated_agents;
InitializeVectorFunctor(UniformGridEnvironment* grid, uint64_t start,
decltype(sorted_boxes) sorted_boxes,
decltype(cummulated_agents) cummulated_agents);
virtual ~InitializeVectorFunctor();
void operator()(Iterator<uint64_t>* it) override;
};
void AllocateMemory();
void InitializeVectors();
};
/// The vector containing all the boxes in the grid
/// Using parallel resize vector to enable parallel initialization and thus
/// better scalability.
ParallelResizeVector<Box> boxes_;
/// is incremented at each call to Update
/// This is used to decide if boxes should be reinitialized
uint32_t timestamp_ = 0;
/// Length of a Box
int32_t box_length_ = 1;
/// Length of a Box squared
int32_t box_length_squared_ = 1;
/// True when the box length was set manually
bool is_custom_box_length_ = false;
/// Stores the number of Boxes for each axis
std::array<uint64_t, 3> num_boxes_axis_ = {{0}};
/// Number of boxes in the xy plane (=num_boxes_axis_[0] * num_boxes_axis_[1])
size_t num_boxes_xy_ = 0;
/// The total number of boxes in the uniform grid
uint64_t total_num_boxes_ = 0;
/// Implements linked list - array index = key, value: next element
///
/// // Usage
/// AgentHandle current_element = ...;
/// AgentHandle next_element = successors_[current_element];
AgentVector<AgentHandle> successors_;
/// Determines which boxes to search neighbors in (see enum Adjacency)
Adjacency adjacency_;
/// Cube which contains all agents
/// {x_min, x_max, y_min, y_max, z_min, z_max}
std::array<int32_t, 6> grid_dimensions_;
/// Stores the min / max dimension value that need to be surpassed in order
/// to trigger a diffusion grid change
std::array<int32_t, 2> threshold_dimensions_;
LoadBalanceInfoUG lbi_; //!
/// Holds instance of NeighborMutexBuilder.
/// NeighborMutexBuilder is updated if `Param::thread_safety_mechanism`
/// is set to `kAutomatic`
std::unique_ptr<GridNeighborMutexBuilder> nb_mutex_builder_ =
std::make_unique<GridNeighborMutexBuilder>();
void CheckGridGrowth() {
// Determine if the grid dimensions have changed (changed in the sense that
// the grid has grown outwards)
auto min_gd =
*std::min_element(grid_dimensions_.begin(), grid_dimensions_.end());
auto max_gd =
*std::max_element(grid_dimensions_.begin(), grid_dimensions_.end());
if (min_gd < threshold_dimensions_[0]) {
threshold_dimensions_[0] = min_gd;
has_grown_ = true;
}
if (max_gd > threshold_dimensions_[1]) {
Log::Info("UniformGridEnvironment",
"Your agents are getting near the edge of "
"the simulation space. Be aware of boundary conditions that "
"may come into play!");
threshold_dimensions_[1] = max_gd;
has_grown_ = true;
}
}
void RoundOffGridDimensions(const std::array<double, 6>& grid_dimensions) {
grid_dimensions_[0] = floor(grid_dimensions[0]);
grid_dimensions_[2] = floor(grid_dimensions[2]);
grid_dimensions_[4] = floor(grid_dimensions[4]);
grid_dimensions_[1] = ceil(grid_dimensions[1]);
grid_dimensions_[3] = ceil(grid_dimensions[3]);
grid_dimensions_[5] = ceil(grid_dimensions[5]);
}
/// @brief Gets the Moore (i.e adjacent) boxes of the query boxAlso adds
/// the
/// query box.
///
/// @param[out] neighbor_boxes The neighbor boxes
/// @param[in] box_idx The query box
///
void GetMooreBoxes(FixedSizeVector<const Box*, 27>* neighbor_boxes,
size_t box_idx) const {
neighbor_boxes->push_back(GetBoxPointer(box_idx));
// Adjacent 6 (top, down, left, right, front and back)
if (adjacency_ >= kLow) {
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - 1));
neighbor_boxes->push_back(GetBoxPointer(box_idx + 1));
}
// Adjacent 12
if (adjacency_ >= kMedium) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx - num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0]));
neighbor_boxes->push_back(GetBoxPointer(box_idx + num_boxes_xy_ + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_axis_[0] + 1));
}
// Adjacent 8
if (adjacency_ >= kHigh) {
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1));
neighbor_boxes->push_back(
GetBoxPointer(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1));
}
}
/// @brief Gets the box indices of all adjacent boxes. Also adds the
/// query box index.
///
/// @param[out] box_indices Result containing all box indices
/// @param[in] box_idx The query box
///
void GetMooreBoxIndices(FixedSizeVector<uint64_t, 27>* box_indices,
size_t box_idx) const {
box_indices->push_back(box_idx);
// Adjacent 6 (top, down, left, right, front and back)
if (adjacency_ >= kLow) {
box_indices->push_back(box_idx - num_boxes_xy_);
box_indices->push_back(box_idx + num_boxes_xy_);
box_indices->push_back(box_idx - num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_axis_[0]);
box_indices->push_back(box_idx - 1);
box_indices->push_back(box_idx + 1);
}
// Adjacent 12
if (adjacency_ >= kMedium) {
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0]);
box_indices->push_back(box_idx - num_boxes_xy_ - 1);
box_indices->push_back(box_idx - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_xy_ - 1);
box_indices->push_back(box_idx + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0]);
box_indices->push_back(box_idx - num_boxes_xy_ + 1);
box_indices->push_back(box_idx - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]);
box_indices->push_back(box_idx + num_boxes_xy_ + 1);
box_indices->push_back(box_idx + num_boxes_axis_[0] + 1);
}
// Adjacent 8
if (adjacency_ >= kHigh) {
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx - num_boxes_xy_ + num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1);
box_indices->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1);
}
}
/// Determines current box based on parameter box_idx and adds it together
/// with half of the surrounding boxes to the vector.
/// Legend: C = center, N = north, E = east, S = south, W = west, F = front,
/// B = back
/// For each box pair which is centro-symmetric only one box is taken --
/// e.g. E-W: E, or BNW-FSE: BNW
///
/// (x-axis to the right \ y-axis up)
/// z=1
/// +-----+----+-----+
/// | BNW | BN | BNE |
/// +-----+----+-----+
/// | NW | N | NE |
/// +-----+----+-----+
/// | FNW | FN | FNE |
/// +-----+----+-----+
///
/// z = 0
/// +-----+----+-----+
/// | BW | B | BE |
/// +-----+----+-----+
/// | W | C | E |
/// +-----+----+-----+
/// | FW | F | FE |
/// +-----+----+-----+
///
/// z = -1
/// +-----+----+-----+
/// | BSW | BS | BSE |
/// +-----+----+-----+
/// | SW | S | SE |
/// +-----+----+-----+
/// | FSW | FS | FSE |
/// +-----+----+-----+
///
void GetHalfMooreBoxIndices(FixedSizeVector<size_t, 14>* neighbor_boxes,
size_t box_idx) const {
// C
neighbor_boxes->push_back(box_idx);
// BW
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] - 1);
// FNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] - 1);
// NW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - 1);
// BNW
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] - 1);
// B
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0]);
// FN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0]);
// N
neighbor_boxes->push_back(box_idx + num_boxes_xy_);
// BN
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0]);
// E
neighbor_boxes->push_back(box_idx + 1);
// BE
neighbor_boxes->push_back(box_idx + num_boxes_axis_[0] + 1);
// FNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ - num_boxes_axis_[0] + 1);
// NE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + 1);
// BNE
neighbor_boxes->push_back(box_idx + num_boxes_xy_ + num_boxes_axis_[0] + 1);
}
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
const Box* GetBoxPointer(size_t index) const { return &(boxes_[index]); }
/// @brief Gets the pointer to the box with the given index
///
/// @param[in] index The index of the box
///
/// @return The pointer to the box
///
Box* GetBoxPointer(size_t index) { return &(boxes_[index]); }
/// Returns the box index in the one dimensional array based on box
/// coordinates in space
///
/// @param box_coord box coordinates in space (x, y, z)
///
/// @return The box index.
///
size_t GetBoxIndex(const std::array<uint64_t, 3>& box_coord) const {
return box_coord[2] * num_boxes_xy_ + box_coord[1] * num_boxes_axis_[0] +
box_coord[0];
}
};
} // namespace bdm
#endif // CORE_ENVIRONMENT_UNIFORM_GRID_ENVIRONMENT_H_
|
gemm_symm_int8.h | // chgemm is pleased to support the open source community by supporting ncnn available.
//
// author:tpoisonooo (https://github.com/tpoisonooo/chgemm) implement symmetric int8 GEMM on aarch64.
//
// Copyright (C) 2019 tpoisonooo. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#pragma once
#if __aarch64__
#define DECOMPOSE_K\
int ktmp = k;\
int k8 = k >> 3;\
int k8_even = (k8 % 2 == 0) ? 0: 1;\
k -= (k8 << 3);\
int k4 = k >> 2;\
k -= (k4 << 2);\
int k2 = k >> 1;\
k -= (k2 << 1);\
int k1 = k;\
k = ktmp;
#define DECOMPOSE_N\
int ntmp = n;\
int n4 = n >> 2;\
n -= (n4 << 2);\
int n2 = n >> 1;\
n -= (n2 << 1);\
int n1 = n;\
n = ntmp;
#define PRINT_MATRIX 0
#if PRINT_MATRIX
static void print_int8_matrix(char* name, const int8_t *a, int m, int k, int ldx) {
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < m; ++i) {
for (int j = 0; j < k; ++j) {
fprintf(stdout, "%d \t", a[i * ldx + j]);
}
fprintf(stdout, "\n\n");
}
}
static void print_int32_matrix(char* name, const int32_t *a, int m, int k, int ldx) {
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < m; ++i) {
for (int j = 0; j < k; ++j) {
fprintf(stdout, "%d \t", a[i * ldx + j]);
}
fprintf(stdout, "\n\n");
}
}
static void print_fp32_vec(char* name, const float *a, int len) {
fprintf(stdout, "------------- %s \n", name);
for (int i = 0; i < len; ++i) {
fprintf(stdout, "%f \t", a[i]);
}
fprintf(stdout, "\n\n");
}
#endif
static void reorder_b(const int8_t* b, int8_t* sb, const int k, const int n, const int ldx) {
#if PRINT_MATRIX
print_int8_matrix("b", b, k, n, ldx);
int8_t *origin = sb;
#endif
int i = 0;
for (; i+3 < n; i += 4) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb[16] = p0[2];
sb[17] = p1[2];
sb[18] = p2[2];
sb[19] = p3[2];
sb[20] = p4[2];
sb[21] = p5[2];
sb[22] = p6[2];
sb[23] = p7[2];
sb[24] = p0[3];
sb[25] = p1[3];
sb[26] = p2[3];
sb[27] = p3[3];
sb[28] = p4[3];
sb[29] = p5[3];
sb[30] = p6[3];
sb[31] = p7[3];
sb += 32;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j+3 < k) {
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb[8] = p0[2];
sb[9] = p1[2];
sb[10] = p2[2];
sb[11] = p3[2];
sb[12] = p0[3];
sb[13] = p1[3];
sb[14] = p2[3];
sb[15] = p3[3];
sb += 16;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j+1 < k) {
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb[4] = p0[2];
sb[5] = p1[2];
sb[6] = p0[3];
sb[7] = p1[3];
sb += 8;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k) {
sb[0] = p0[0];
sb[1] = p0[1];
sb[2] = p0[2];
sb[3] = p0[3];
sb += 4;
p0 += ldx;
}
}
if (i+1 < n) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb[8] = p0[1];
sb[9] = p1[1];
sb[10] = p2[1];
sb[11] = p3[1];
sb[12] = p4[1];
sb[13] = p5[1];
sb[14] = p6[1];
sb[15] = p7[1];
sb += 16;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j+3 < k) {
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p0[1];
sb[5] = p1[1];
sb[6] = p2[1];
sb[7] = p3[1];
sb += 8;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j+1 < k) {
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p0[1];
sb[3] = p1[1];
sb += 4;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k) {
sb[0] = p0[0];
sb[1] = p0[1];
sb += 2;
p0 += ldx;
}
i += 2;
}
if (i < n) {
const int8_t *p0 = b + i;
const int8_t *p1 = b + 1 * ldx + i;
const int8_t *p2 = b + 2 * ldx + i;
const int8_t *p3 = b + 3 * ldx + i;
const int8_t *p4 = b + 4 * ldx + i;
const int8_t *p5 = b + 5 * ldx + i;
const int8_t *p6 = b + 6 * ldx + i;
const int8_t *p7 = b + 7 * ldx + i;
int j = 0;
for (; j+7 < k; j += 8) {
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb[4] = p4[0];
sb[5] = p5[0];
sb[6] = p6[0];
sb[7] = p7[0];
sb += 8;
p0 += 8 * ldx;
p1 += 8 * ldx;
p2 += 8 * ldx;
p3 += 8 * ldx;
p4 += 8 * ldx;
p5 += 8 * ldx;
p6 += 8 * ldx;
p7 += 8 * ldx;
}
if (j+3 < k) {
j += 4;
sb[0] = p0[0];
sb[1] = p1[0];
sb[2] = p2[0];
sb[3] = p3[0];
sb += 4;
p0 += 4 * ldx;
p1 += 4 * ldx;
p2 += 4 * ldx;
p3 += 4 * ldx;
}
if (j+1 < k) {
j += 2;
sb[0] = p0[0];
sb[1] = p1[0];
sb += 2;
p0 += 2 * ldx;
p1 += 2 * ldx;
}
if (j < k) {
sb[0] = p0[0];
sb += 1;
p0 += ldx;
}
}
#if PRINT_MATRIX
print_int8_matrix("sb", origin, k, n, n);
#endif
}
static void reorder_a(int8_t* a, int8_t* sa, int m, const int k, const int ldx) {
#if PRINT_MATRIX
print_int8_matrix("a", a, m, k, ldx);
int8_t *origin = sa;
#endif
int i = 0;
for (; i + 3 < m; i += 4) {
int8_t *p0 = a;
int8_t *p1 = a + ldx;
int8_t *p2 = a + 2 * ldx;
int8_t *p3 = a + 3 * ldx;
int j = 0;
for (; j + 7 < k; j += 8) {
asm volatile (
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"ld1 {v2.8b}, [%2], #8 \n"
"ld1 {v3.8b}, [%3], #8 \n"
"st1 {v0.8b, v1.8b, v2.8b, v3.8b}, [%4], #32\n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j + 3 < k) {
j += 4;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #4 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
"trn1 v2.2s, v2.2s, v3.2s \n"
"st1 {v2.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j + 1 < k) {
j += 2;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #2 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #2 \n"
"ld1 {v2.8b}, [%2] \n"
"add %2, %2, #2 \n"
"ld1 {v3.8b}, [%3] \n"
"add %3, %3, #2 \n"
"trn1 v0.4h, v0.4h, v1.4h \n"
"trn1 v2.4h, v2.4h, v3.4h \n"
"trn1 v0.2s, v0.2s, v2.2s \n"
"st1 {v0.8b}, [%4], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(p2),
"=r"(p3),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(p2),
"3"(p3),
"4"(sa)
: "cc", "memory", "v0", "v1", "v2", "v3"
);
}
if (j < k) {
*sa++ = *p0;
*sa++ = *p1;
*sa++ = *p2;
*sa++ = *p3;
}
a += 4 * ldx;
}
if (i + 1 < m) {
i += 2;
int8_t *p0 = a;
int8_t *p1 = a + ldx;
int j = 0;
for (; j + 7 < k; j += 8) {
asm volatile (
"ld1 {v0.8b}, [%0], #8 \n"
"ld1 {v1.8b}, [%1], #8 \n"
"st1 {v0.8b, v1.8b}, [%2], #16\n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1"
);
}
if (j + 3 < k) {
j += 4;
asm volatile (
"ld1 {v0.8b}, [%0] \n"
"add %0, %0, #4 \n"
"ld1 {v1.8b}, [%1] \n"
"add %1, %1, #4 \n"
"trn1 v0.2s, v0.2s, v1.2s \n"
"st1 {v0.8b}, [%2], #8 \n"
: "=r"(p0),
"=r"(p1),
"=r"(sa)
: "0"(p0),
"1"(p1),
"2"(sa)
: "cc", "memory", "v0", "v1"
);
}
if (j + 1 < k) {
j += 2;
sa[0] = p0[0];
sa[1] = p0[1];
sa[2] = p1[0];
sa[3] = p1[1];
sa += 4;
p0 += 2;
p1 += 2;
}
if (j < k) {
sa[0] = p0[0];
sa[1] = p1[0];
sa += 2;
}
a += 2 * ldx;
}
if (i < m) {
memcpy(sa, a, sizeof(int8_t) * ldx);
}
#if PRINT_MATRIX
print_int8_matrix("sa", origin, m, k, k);
#endif
}
void int8kernel_m1(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int, float* scales, float* bias) {
void *pc = dst;
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f \n"
" mov w19, %w4 \n"
" cmp %w3, #0 \n"
" beq 2f// loop number is even \n"
" // start loopm1_kd8_nd4\n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" saddlp v10.4s, v0.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" saddlp v11.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v12.8b, v13.8b, v14.8b, v15.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v12.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v13.8b \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v2.8b, v6.8b \n"
" smlal v0.8h, v3.8b, v14.8b \n"
" sadalp v10.4s, v0.8h \n"
" smull v1.8h, v2.8b, v7.8b \n"
" smlal v1.8h, v3.8b, v15.8b \n"
" sadalp v11.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, #0 \n"
" beq 4f \n"
" // start subkernel_m1n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" 4: \n"
" cmp %w6, #0 \n"
" beq 5f \n"
" // start subkernel_m1n4k2\n"
" ld1 {v4.8b}, [%0] // load A1x2 \n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" mov v4.h[1], v4.h[0] \n"
" mov v4.s[1], v4.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" sadalp v8.4s, v0.8h \n"
" 5: \n"
" cmp %w7, #0 \n"
" beq 6f \n"
" // start subkernel_m1n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #1 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" ldr w24, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" mov v12.s[0], w24 \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" dup v15.4s, w24 \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2]\n"
" add %2, %2, #4 \n"
" b m1_loopnd4_finish\n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" m1_loopnd4_finish: \n"
" subs %w8, %w8, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
asm volatile(
"m1_nd2_start: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" mov x8, %0 // PanelA\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7\n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd1_kd8_nd2 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b}, [%0], #8 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" saddlp v9.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v2.8b, v4.8b \n"
" smlal v0.8h, v3.8b, v6.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v2.8b, v5.8b \n"
" smlal v1.8h, v3.8b, v7.8b \n"
" sadalp v9.4s, v1.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n2k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" mov v4.h[1], v4.h[0] \n"
" smull v0.8h, v4.8b, v0.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A1x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // v12: s0 s1 \n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" mov v12.s[1], v12.s[0] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8:\n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2]\n"
" add %2, %2, #2 \n"
" b m1_loopnd2_finish\n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" m1_loopnd2_finish: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile (
"m1_nd1_start: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b\n"
" eor v11.16b, v11.16b, v11.16b\n"
" cmp %w4, #0 \n"
" beq 1f // k <= 7 \n"
" mov w19, %w4\n"
" cmp %w3, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w19, w19, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load B line \n"
" ld1 {v2.8b}, [%0], #8 // load A line \n"
" smull v0.8h, v4.8b, v2.8b \n"
" saddlp v8.4s, v0.8h \n"
" cmp w19, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v25.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" subs w19, w19, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w5, 0 \n"
" beq 4f \n"
" // start subkernel_m1n1k4 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0] // load A1x4\n"
" add %0, %0, #4 \n"
" sxtl v2.8h, v2.8b \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" 4: \n"
" cmp %w6, 0 \n"
" beq 5f \n"
" // start subkernel_m1n1k2 \n"
" ld1 {v4.8b}, [%0] // load A1x2\n"
" add %0, %0, #2 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" add v8.4s, v8.4s, v0.4s \n"
" 5: \n"
" cmp %w7, 0 \n"
" beq 6f \n"
" // start subkernel_m1n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1 \n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A1x1 \n"
" add %0, %0, #1 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0] \n"
" add v8.4s, v8.4s, v0.4s \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm\n"
" ldr w24, [%9] \n"
" mov v12.s[0], w24 \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ldr w24, [%10] \n"
" mov v12.s[0], w24 \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s\n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2]\n"
" b m1_finish \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" m1_finish: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc), // %2
"=r"(k8_even),// %3
"=r"(k8), // %4
"=r"(k4), // %5
"=r"(k2), // %6
"=r"(k1), // %7
"=r"(n4), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc),
"3"(k8_even),
"4"(k8),
"5"(k4),
"6"(k2),
"7"(k1),
"8"(n4),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w19", "w24", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
void int8kernel_m2(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) {
void *pc0, *pc1;
if (scales == 0) {
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
} else {
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
}
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
asm volatile(
"9: \n"
" eor v8.16b, v8.16b, v8.16b \n"
" eor v9.16b, v9.16b, v9.16b \n"
" eor v10.16b, v10.16b, v10.16b \n"
" eor v11.16b, v11.16b, v11.16b \n"
" eor v12.16b, v12.16b, v12.16b \n"
" eor v13.16b, v13.16b, v13.16b \n"
" eor v14.16b, v14.16b, v14.16b \n"
" eor v15.16b, v15.16b, v15.16b \n"
" eor v16.16b, v16.16b, v16.16b \n"
" eor v17.16b, v17.16b, v17.16b \n"
" eor v18.16b, v18.16b, v18.16b \n"
" eor v19.16b, v19.16b, v19.16b \n"
" eor v20.16b, v20.16b, v20.16b \n"
" eor v21.16b, v21.16b, v21.16b \n"
" eor v22.16b, v22.16b, v22.16b \n"
" eor v23.16b, v23.16b, v23.16b \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopm2_kd8_nd4\n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" saddlp v15.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" add x12, %1, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // start v10v11, v14v15, v18v19, v22v23, error here!\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x12], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" add %1, %1, #32 \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" // start process kd4 kd2 kd1 cases \n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n4k4 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" sxtl v5.8h, v5.8b \n"
" mov v6.d[0], v4.d[1] \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v16.4s, v3.4h, v4.4h \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" 4: \n"
" cmp %w7, #0 \n"
" beq 5f \n"
" // start subkernel_m2n4k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2 \n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" smull v14.8h, v4.8b, v2.8b \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v12.4s, v12.8h \n"
" saddlp v13.4s, v13.8h \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n4k1 \n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v9.4s, v4.4h, v2.h[1]\n"
" 6: \n"
" cmp %10, #0 \n"
" beq 7f \n"
" ld1 {v12.2s}, [%10] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0]\n"
" fmul v9.4s, v9.4s, v12.s[1]\n"
" cmp %11, #0 \n"
" beq 8f \n"
" // fp32 += scales_tm \n"
" ld1 {v14.2s}, [%11] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s\n"
" fcvtas v9.4s, v9.4s\n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s\n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %2, %2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %3, %3, #4 \n"
" b m2_loopnd4_finish \n"
" 7: \n"
" st1 {v8.4s}, [%2], #16 \n"
" st1 {v9.4s}, [%3], #16 \n"
" m2_loopnd4_finish: \n"
" subs %w9, %w9, #1 \n"
" mov %0, x8 \n"
" bne 9b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(n4), // %9
"=r"(scales), // %10
"=r"(bias) // %11
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(n4),
"10"(scales),
"11"(bias)
: "cc", "memory", "x8", "w17", "x12", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"m2_nd2_start: \n"
" mov x8, %0 // PanelA \n"
" cmp %w5, #0 \n"
" beq 1f \n"
" mov w17, %w5 \n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopmd2_kd8_nd2 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [%1], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [%0], #16\n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n2k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x2\n"
" add %1, %1, #4 \n"
" // 00 11\n"
" rev32 v1.4h, v0.4h // 11 00\n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, #0 \n"
" beq 6f \n"
" // start subkernel_m2n2k1 \n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #2 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0]\n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" 6: \n"
" cmp %9, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" // v12: 0 1 \n"
" ld1 {v12.2s}, [%9] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" // v12: 0 0 1 1 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" b m2_loopnd2_finish \n"
" 7:"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" m2_loopnd2_finish: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x8", "x12", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile(
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"eor v12.16b, v12.16b, v12.16b \n"
"eor v13.16b, v13.16b, v13.16b \n"
"eor v14.16b, v14.16b, v14.16b \n"
"eor v15.16b, v15.16b, v15.16b \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"m2_nd1_start: \n"
" cmp %w5, #0 \n"
" beq 1f // k <=7\n"
" mov w17, %w5\n"
" cmp %w4, #0 \n"
" beq 2f // loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w17, w17, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" cmp w17, #0 \n"
" beq 3f \n"
" 2: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v26.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v27.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" subs w17, w17, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w6, #0 \n"
" beq 4f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %1, %1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4\n"
" ld1 {v2.8b}, [%0], #8 // load A2x4 \n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" 4: \n"
" cmp %w7, 0 \n"
" beq 5f \n"
" // start subkernel_m2n1k2 \n"
" ld1 {v4.8b}, [%0] // load A2x2\n"
" add %0, %0, #4 \n"
" ld1 {v0.8b}, [%1] // load B2x1\n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" 5: \n"
" cmp %w8, 0 \n"
" beq 6f \n"
" // start subkernel_m2n1k1 \n"
" ld1 {v0.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v1.8b}, [%0] // load A2x1\n"
" add %0, %0, #2 \n"
" sxtl v1.8h, v1.8b \n"
" sxtl v0.8h, v0.8b \n"
" smull v0.4s, v1.4h, v0.h[0]\n"
" mov v1.s[0], v0.s[1] \n"
" add v8.4s, v8.4s, v0.4s \n"
" add v12.4s, v12.4s, v1.4s \n"
" 6: \n"
" cmp %w9, #0 \n"
" beq 7f \n"
" mov v8.s[1], v12.s[0] \n"
" // v12: s0 s1 \n"
" ld1 {v12.2s}, [%9] \n"
" // int32 => fp32 \n"
" scvtf v8.2s, v8.2s \n"
" // fp32 *= scale_tm \n"
" fmul v8.2s, v8.2s, v12.2s \n"
" cmp %10, #0 \n"
" beq 8f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.2s}, [%10] \n"
" fadd v8.2s, v8.2s, v12.2s \n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.2s, v8.2s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" b m2_finish \n"
" 7: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" m2_finish: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(k8_even),// %4
"=r"(k8), // %5
"=r"(k4), // %6
"=r"(k2), // %7
"=r"(k1), // %8
"=r"(scales), // %9
"=r"(bias) // %10
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(k8_even),
"5"(k8),
"6"(k4),
"7"(k2),
"8"(k1),
"9"(scales),
"10"(bias)
: "cc", "memory", "x0", "x8", "w17", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
void int8kernel_m4(void* dst, int8_t* sa, int8_t* sb, int, int k, int n, int ldc, float* scales, float* bias) {
void *pc0, *pc1, *pc2, *pc3;
if (scales == 0) {
pc0 = (int32_t*)dst;
pc1 = ((int32_t*)pc0) + ldc;
pc2 = ((int32_t*)pc1) + ldc;
pc3 = ((int32_t*)pc2) + ldc;
} else {
pc0 = dst;
pc1 = ((int8_t*)pc0) + ldc;
pc2 = ((int8_t*)pc1) + ldc;
pc3 = ((int8_t*)pc2) + ldc;
}
int8_t *pa = sa;
int8_t *pb = sb;
DECOMPOSE_K
DECOMPOSE_N
if (n4 > 0) {
asm volatile(
"8: \n"
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
" mov x8, %0 \n"
" cmp %w7, #0 \n"
" beq 1f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b, v6.8b, v7.8b}, [%1], #32 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v10.4s, v0.8h \n"
" saddlp v14.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v11.4s, v0.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" saddlp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" smull v0.8h, v6.8b, v2.8b \n"
" smull v1.8h, v6.8b, v3.8b \n"
" saddlp v18.4s, v0.8h \n"
" saddlp v22.4s, v1.8h \n"
" smull v0.8h, v7.8b, v2.8b \n"
" smull v1.8h, v7.8b, v3.8b \n"
" saddlp v19.4s, v0.8h \n"
" saddlp v23.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %x1, #32 \n"
" add x14, %x0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16\n"
" ld1 {v2.8b, v3.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v24.8b\n"
" smlal v1.8h, v7.8b, v24.8b\n"
" sadalp v8.4s, v0.8h\n"
" sadalp v9.4s, v1.8h\n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h\n"
" sadalp v13.4s, v1.8h\n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v16.4s, v0.8h \n"
" sadalp v17.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v20.4s, v0.8h \n"
" sadalp v21.4s, v1.8h \n"
" // start v10v11, v14v15, v18v19, v22v23\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v10.4s, v0.8h \n"
" sadalp v11.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v14.4s, v0.8h \n"
" sadalp v15.4s, v1.8h \n"
" smull v0.8h, v4.8b, v28.8b \n"
" smull v1.8h, v5.8b, v28.8b \n"
" smlal v0.8h, v6.8b, v26.8b \n"
" smlal v1.8h, v7.8b, v26.8b \n"
" sadalp v18.4s, v0.8h \n"
" sadalp v19.4s, v1.8h \n"
" smull v0.8h, v4.8b, v29.8b \n"
" smull v1.8h, v5.8b, v29.8b \n"
" smlal v0.8h, v6.8b, v27.8b \n"
" smlal v1.8h, v7.8b, v27.8b \n"
" sadalp v22.4s, v0.8h \n"
" sadalp v23.4s, v1.8h \n"
" add %0, %0, #32 \n"
" add %1, %1, #32 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
// start nd2
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v10.4s, v10.4s, v11.4s\n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v22.4s, v22.4s, v23.4s\n"
" addp v8.4s, v8.4s, v10.4s \n"
" addp v9.4s, v12.4s, v14.4s \n"
" addp v10.4s, v16.4s, v18.4s\n"
" addp v11.4s, v20.4s, v22.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, #0 \n"
" beq 4f \n"
" // start subkernel_m4n4k4\n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load B4x4\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" sxtl v5.8h, v5.8b \n"
" mov v7.d[0], v5.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v8.4s, v8.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v9.4s, v9.4s, v16.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v12.4s, v2.4h, v4.4h \n"
" smull v13.4s, v2.4h, v6.4h \n"
" smull v14.4s, v2.4h, v5.4h \n"
" addp v12.4s, v12.4s, v13.4s\n"
" smull v15.4s, v2.4h, v7.4h \n"
" addp v14.4s, v14.4s, v15.4s\n"
" addp v12.4s, v12.4s, v14.4s\n"
" smull v16.4s, v3.4h, v4.4h \n"
" add v10.4s, v10.4s, v12.4s \n"
" smull v17.4s, v3.4h, v6.4h \n"
" smull v18.4s, v3.4h, v5.4h \n"
" addp v16.4s, v16.4s, v17.4s\n"
" smull v19.4s, v3.4h, v7.4h \n"
" addp v18.4s, v18.4s, v19.4s\n"
" addp v16.4s, v16.4s, v18.4s\n"
" add v11.4s, v11.4s, v16.4s \n"
" 4: \n"
" cmp %w9, #0 \n"
" beq 5f \n"
" // start subkernel_m4n4k2 \n"
" ld1 {v0.8b}, [%1], #8 // load B2x4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v12.8h, v4.8b, v0.8b \n"
" smull v13.8h, v4.8b, v1.8b \n"
" saddlp v12.4s, v12.8h \n"
" smull v14.8h, v4.8b, v2.8b \n"
" saddlp v13.4s, v13.8h \n"
" smull v15.8h, v4.8b, v3.8b \n"
" saddlp v14.4s, v14.8h \n"
" saddlp v15.4s, v15.8h \n"
" mov v16.s[0], v12.s[0] \n"
" mov v16.s[1], v13.s[0] \n"
" mov v16.s[2], v14.s[0] \n"
" mov v16.s[3], v15.s[0] \n"
" mov v17.s[0], v13.s[1] \n"
" mov v17.s[1], v12.s[1] \n"
" mov v17.s[2], v15.s[1] \n"
" mov v17.s[3], v14.s[1] \n"
" mov v18.s[0], v14.s[2] \n"
" mov v18.s[1], v15.s[2] \n"
" mov v18.s[2], v12.s[2] \n"
" mov v18.s[3], v13.s[2] \n"
" mov v19.s[0], v15.s[3] \n"
" mov v19.s[1], v14.s[3] \n"
" mov v19.s[2], v13.s[3] \n"
" mov v19.s[3], v12.s[3] \n"
" add v8.4s, v8.4s, v16.4s \n"
" add v9.4s, v9.4s, v17.4s \n"
" add v10.4s, v10.4s, v18.4s \n"
" add v11.4s, v11.4s, v19.4s \n"
" 5: \n"
" cmp %w10, #0 \n"
" beq 6f \n"
" // start subkernel_m4n4k1\n"
" ld1 {v4.8b}, [%1] // load B1x4\n"
" add %1, %1, #4 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v9.4s, v4.4h, v2.h[1] \n"
" smlal v10.4s, v4.4h, v2.h[2] \n"
" smlal v11.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %12, #0 \n"
" beq 9f \n"
" ld1 {v12.4s}, [%12] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v9.4s, v9.4s \n"
" scvtf v10.4s, v10.4s \n"
" scvtf v11.4s, v11.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.s[0] \n"
" fmul v9.4s, v9.4s, v12.s[1] \n"
" fmul v10.4s, v10.4s, v12.s[2] \n"
" fmul v11.4s, v11.4s, v12.s[3] \n"
" cmp %13, #0 \n"
" beq 7f \n"
" ld1 {v14.4s}, [%13] \n"
" dup v15.4s, v14.s[0] \n"
" fadd v8.4s, v8.4s, v15.4s \n"
" dup v15.4s, v14.s[1] \n"
" fadd v9.4s, v9.4s, v15.4s \n"
" dup v15.4s, v14.s[2] \n"
" fadd v10.4s, v10.4s, v15.4s\n"
" dup v15.4s, v14.s[3] \n"
" fadd v11.4s, v11.4s, v15.4s\n"
" 7: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v9.4s, v9.4s \n"
" fcvtas v10.4s, v10.4s \n"
" fcvtas v11.4s, v11.4s \n"
" // int32 -> int16 \n"
" sqxtn v6.4h, v8.4s \n"
" sqxtn2 v6.8h, v9.4s \n"
" sqxtn v7.4h, v10.4s \n"
" sqxtn2 v7.8h, v11.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v6.8h \n"
" sqxtn v9.8b, v7.8h \n"
" // save \n"
" st1 {v8.s}[0], [%2] \n"
" add %x2, %x2, #4 \n"
" st1 {v8.s}[1], [%3] \n"
" add %x3, %x3, #4 \n"
" st1 {v9.s}[0], [%4] \n"
" add %x4, %x4, #4 \n"
" st1 {v9.s}[1], [%5] \n"
" add %x5, %x5, #4 \n"
" b m4_loopnd4_finish \n"
" 9: \n"
" st1 {v8.4s}, [%x2], #16 \n"
" st1 {v9.4s}, [%x3], #16 \n"
" st1 {v10.4s}, [%x4], #16 \n"
" st1 {v11.4s}, [%x5], #16 \n"
" m4_loopnd4_finish: \n"
" subs %x11, %x11, #1 \n"
" mov %x0, x8 \n"
" bne 8b \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(n4), // %11
"=r"(scales), // %12
"=r"(bias) // %13
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(n4),
"12"(scales),
"13"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n2 > 0) {
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"m4_nd2_start: \n"
" mov x8, %x0 // PanelA \n"
" cmp %w7, #0 \n"
" beq 1f // k <= 7 \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 2f// loop number is even \n"
" // start loopkd8_nd2 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 // load two lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v9.4s, v0.8h \n"
" saddlp v13.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" smull v0.8h, v5.8b, v2.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" saddlp v17.4s, v0.8h \n"
" saddlp v21.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 3f \n"
" 2: \n"
" add x15, %1, #16 \n"
" add x14, %0, #32 \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" ld1 {v6.8b, v7.8b}, [x15], #16 \n"
" smull v1.8h, v5.8b, v2.8b \n"
" ld1 {v24.8b, v25.8b}, [x14], #16 \n"
" smlal v0.8h, v6.8b, v24.8b \n"
" smlal v1.8h, v7.8b, v24.8b \n"
" sadalp v8.4s, v0.8h \n"
" sadalp v9.4s, v1.8h \n"
" smull v0.8h, v4.8b, v3.8b \n"
" smull v1.8h, v5.8b, v3.8b \n"
" smlal v0.8h, v6.8b, v25.8b \n"
" smlal v1.8h, v7.8b, v25.8b \n"
" sadalp v12.4s, v0.8h \n"
" sadalp v13.4s, v1.8h \n"
" // finish v8v9 v12v13, start proc v16v17,v20v21\n"
" ld1 {v28.8b, v29.8b}, [%0], #16\n"
" smull v0.8h, v4.8b, v28.8b\n"
" smull v1.8h, v5.8b, v28.8b\n"
" ld1 {v26.8b, v27.8b}, [x14], #16\n"
" smlal v0.8h, v6.8b, v26.8b\n"
" smlal v1.8h, v7.8b, v26.8b\n"
" sadalp v16.4s, v0.8h\n"
" sadalp v17.4s, v1.8h\n"
" smull v0.8h, v4.8b, v29.8b\n"
" smull v1.8h, v5.8b, v29.8b\n"
" smlal v0.8h, v6.8b, v27.8b\n"
" smlal v1.8h, v7.8b, v27.8b\n"
" sadalp v20.4s, v0.8h\n"
" sadalp v21.4s, v1.8h\n"
" add %0, %0, #32 \n"
" add %1, %1, #16 \n"
" subs w20, w20, #2 \n"
" bne 2b \n"
" 3: \n"
" addp v8.4s, v8.4s, v9.4s \n"
" addp v12.4s, v12.4s, v13.4s\n"
" addp v16.4s, v16.4s, v17.4s\n"
" addp v20.4s, v20.4s, v21.4s\n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 1: \n"
" cmp %w8, 0 \n"
" beq 4f \n"
" // start subkernel_m4n2k4 \n"
" ld1 {v4.8b}, [%1], #8 // load B4x2\n"
" sxtl v4.8h, v4.8b \n"
" mov v6.d[0], v4.d[1] \n"
" ld1 {v2.8b}, [%0], #8 // load first A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v9.4s, v2.4h, v4.4h \n"
" smull v10.4s, v2.4h, v6.4h \n"
" addp v9.4s, v9.4s, v10.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v3.4h, v4.4h \n"
" smull v14.4s, v3.4h, v6.4h \n"
" addp v13.4s, v13.4s, v14.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" ld1 {v2.8b}, [%0], #8 // load next A2x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v3.d[0], v2.d[1] \n"
" smull v17.4s, v2.4h, v4.4h \n"
" smull v18.4s, v2.4h, v6.4h \n"
" addp v17.4s, v17.4s, v18.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v3.4h, v4.4h \n"
" smull v22.4s, v3.4h, v6.4h \n"
" addp v21.4s, v21.4s, v22.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 4: \n"
" cmp %w9, 0 \n"
" beq 5f \n"
" // start subkernel_m4n2k2 \n"
" ld1 {v4.8b}, [%0], #8 //load A4x2\n"
" ld1 {v0.8b}, [%1] // load B2x2 \n"
" add %1, %1, #4 \n"
" // 00 11 22 33 \n"
" rev32 v1.4h, v0.4h // 11 00 33 22 \n"
" rev64 v2.2s, v0.2s // 22 33 00 11 \n"
" rev64 v3.4h, v0.4h // 33 22 11 00 \n"
" smull v21.8h, v4.8b, v0.8b \n"
" smull v22.8h, v4.8b, v1.8b \n"
" smull v23.8h, v4.8b, v2.8b \n"
" smull v24.8h, v4.8b, v3.8b \n"
" saddlp v21.4s, v21.8h \n"
" saddlp v22.4s, v22.8h \n"
" saddlp v23.4s, v23.8h \n"
" saddlp v24.4s, v24.8h \n"
" mov v9.s[0], v21.s[0] \n"
" mov v9.s[1], v22.s[0] \n"
" add v8.4s, v8.4s, v9.4s\n"
" mov v13.s[0], v22.s[1] \n"
" mov v13.s[1], v21.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v23.s[2] \n"
" mov v17.s[1], v24.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v24.s[3] \n"
" mov v21.s[1], v23.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 5: \n"
" cmp %w10, 0 \n"
" beq 6f \n"
" // start subkernel_m4n2k1\n"
" ld1 {v4.8b}, [%1] // load B1x2\n"
" add %1, %1, #2 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smlal v8.4s, v4.4h, v2.h[0] \n"
" smlal v12.4s, v4.4h, v2.h[1] \n"
" smlal v16.4s, v4.4h, v2.h[2] \n"
" smlal v20.4s, v4.4h, v2.h[3] \n"
" 6: \n"
" cmp %11, #0 \n"
" beq 7f \n"
" mov v8.d[1], v12.d[0] \n"
" mov v16.d[1], v20.d[0] \n"
" // v12: 0 1 2 3 \n"
" ld1 {v12.4s}, [%11] \n"
" zip2 v13.4s, v12.4s, v12.4s \n"
" zip1 v12.4s, v12.4s, v12.4s \n"
" // v12: 0 0 1 1 \n"
" // v13: 2 2 3 3 \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" scvtf v16.4s, v16.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" fmul v16.4s, v16.4s, v13.4s\n"
" cmp %12, #0 \n"
" beq 8f // skip add scales \n"
" // fp32 += scales_tm \n"
" ld1 {v12.4s}, [%12] \n"
" zip2 v13.4s, v12.4s, v12.4s\n"
" zip1 v12.4s, v12.4s, v12.4s\n"
" fadd v8.4s, v8.4s, v12.4s \n"
" fadd v16.4s, v16.4s, v13.4s\n"
" 8: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" fcvtas v16.4s, v16.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" sqxtn v16.4h, v16.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" sqxtn v16.8b, v16.8h \n"
" // save \n"
" st1 {v8.h}[0], [%2] \n"
" add %2, %2, #2 \n"
" st1 {v8.h}[1], [%3] \n"
" add %3, %3, #2 \n"
" st1 {v16.h}[0], [%4] \n"
" add %4, %4, #2 \n"
" st1 {v16.h}[1], [%5] \n"
" add %5, %5, #2 \n"
" b m4_loopnd2_finish \n"
" 7: \n"
" st1 {v8.2s}, [%2], #8 \n"
" st1 {v12.2s}, [%3], #8 \n"
" st1 {v16.2s}, [%4], #8 \n"
" st1 {v20.2s}, [%5], #8 \n"
" m4_loopnd2_finish: \n"
" mov %0, x8 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
if (n1 > 0) {
asm volatile(
" eor v8.8b, v8.8b, v8.8b \n"
" eor v9.8b, v9.8b, v9.8b \n"
" eor v10.8b, v10.8b, v10.8b \n"
" eor v11.8b, v11.8b, v11.8b \n"
" eor v12.8b, v12.8b, v12.8b \n"
" eor v13.8b, v13.8b, v13.8b \n"
" eor v14.8b, v14.8b, v14.8b \n"
" eor v15.8b, v15.8b, v15.8b \n"
" eor v16.8b, v16.8b, v16.8b \n"
" eor v17.8b, v17.8b, v17.8b \n"
" eor v18.8b, v18.8b, v18.8b \n"
" eor v19.8b, v19.8b, v19.8b \n"
" eor v20.8b, v20.8b, v20.8b \n"
" eor v21.8b, v21.8b, v21.8b \n"
" eor v22.8b, v22.8b, v22.8b \n"
" eor v23.8b, v23.8b, v23.8b \n"
"m4_n1_start: \n"
" cmp %w7, #0 \n"
" beq 10f \n"
" mov w20, %w7 \n"
" cmp %w6, #0 \n"
" beq 11f// loop number is even \n"
" // start loopkd8_nd1 \n"
" subs w20, w20, #1 \n"
" ld1 {v4.8b}, [%1], #8 // load four lines of B\n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load two lines of PanelA\n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v8.4s, v0.8h \n"
" saddlp v12.4s, v1.8h \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 \n"
" smull v0.8h, v4.8b, v2.8b \n"
" smull v1.8h, v4.8b, v3.8b \n"
" saddlp v16.4s, v0.8h \n"
" saddlp v20.4s, v1.8h \n"
" cmp w20, #0 \n"
" beq 12f \n"
" 11: \n"
" ld1 {v4.8b, v5.8b}, [%1], #16 \n"
" ld1 {v24.8b, v25.8b, v26.8b, v27.8b}, [%0], #32\n"
" ld1 {v28.8b, v29.8b, v30.8b, v31.8b}, [%0], #32\n"
" smull v0.8h, v24.8b, v4.8b \n"
" smlal v0.8h, v28.8b, v5.8b \n"
" sadalp v8.4s, v0.8h \n"
" smull v1.8h, v25.8b, v4.8b \n"
" smlal v1.8h, v29.8b, v5.8b \n"
" sadalp v12.4s, v1.8h \n"
" smull v0.8h, v26.8b, v4.8b \n"
" smlal v0.8h, v30.8b, v5.8b \n"
" sadalp v16.4s, v0.8h \n"
" smull v1.8h, v27.8b, v4.8b \n"
" smlal v1.8h, v31.8b, v5.8b \n"
" sadalp v20.4s, v1.8h \n"
" subs w20, w20, #2 \n"
" bne 11b \n"
" 12: \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v8.4s, v8.4s, v8.4s \n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v12.4s, v12.4s, v12.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v16.4s, v16.4s, v16.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" addp v20.4s, v20.4s, v20.4s\n"
" // start process kd4 kd2 kd1 cases\n"
" 10: \n"
" cmp %w8, #0 \n"
" beq 13f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%1] // load B4x1\n"
" add %x1, %x1, #4 \n"
" sxtl v4.8h, v4.8b // extend B4x1 to v4 \n"
" ld1 {v2.8b, v3.8b}, [%0], #16 // load A4x4\n"
" sxtl v2.8h, v2.8b \n"
" mov v5.d[0], v2.d[1] \n"
" sxtl v3.8h, v3.8b \n"
" mov v6.d[0], v3.d[1] // extend A4x4 to v2,v5,v3,v6\n"
" smull v9.4s, v2.4h, v4.4h \n"
" addp v9.4s, v9.4s, v9.4s \n"
" addp v9.4s, v9.4s, v9.4s \n"
" add v8.4s, v8.4s, v9.4s \n"
" smull v13.4s, v5.4h, v4.4h \n"
" addp v13.4s, v13.4s, v13.4s\n"
" addp v13.4s, v13.4s, v13.4s\n"
" add v12.4s, v12.4s, v13.4s \n"
" smull v17.4s, v3.4h, v4.4h \n"
" addp v17.4s, v17.4s, v17.4s\n"
" addp v17.4s, v17.4s, v17.4s\n"
" add v16.4s, v16.4s, v17.4s \n"
" smull v21.4s, v6.4h, v4.4h \n"
" addp v21.4s, v21.4s, v21.4s\n"
" addp v21.4s, v21.4s, v21.4s\n"
" add v20.4s, v20.4s, v21.4s \n"
" 13: \n"
" cmp %w9, #0 \n"
" beq 14f \n"
" // start subkernel_m4n1k2 \n"
" ld1 {v4.8b}, [%0], #8 // load A4x2 \n"
" ld1 {v0.8b}, [%1] // load B2x1 \n"
" add %1, %1, #2 \n"
" mov v0.h[1], v0.h[0] \n"
" mov v0.s[1], v0.s[0] \n"
" smull v0.8h, v0.8b, v4.8b \n"
" saddlp v0.4s, v0.8h \n"
" mov v9.s[0], v0.s[0] \n"
" add v8.4s, v8.4s, v9.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 14: \n"
" cmp %w10, #0 \n"
" beq 15f \n"
" // start subkernel_m4n1k1 \n"
" ld1 {v4.8b}, [%1] // load B1x1\n"
" add %1, %1, #1 \n"
" ld1 {v2.8b}, [%0] // load A4x1\n"
" add %0, %0, #4 \n"
" sxtl v4.8h, v4.8b \n"
" sxtl v2.8h, v2.8b \n"
" smull v0.4s, v2.4h, v4.h[0]\n"
" add v8.4s, v8.4s, v0.4s \n"
" mov v13.s[0], v0.s[1] \n"
" add v12.4s, v12.4s, v13.4s \n"
" mov v17.s[0], v0.s[2] \n"
" add v16.4s, v16.4s, v17.4s \n"
" mov v21.s[0], v0.s[3] \n"
" add v20.4s, v20.4s, v21.4s \n"
" 15: \n"
// REQUANT
" cmp %11, #0 \n"
" beq 16f \n"
" mov v8.s[1], v12.s[0] \n"
" mov v8.s[2], v16.s[0] \n"
" mov v8.s[3], v20.s[0] \n"
" // v12: s0 s1 s2 s3 \n"
" ld1 {v12.4s}, [%11] \n"
" // int32 => fp32 \n"
" scvtf v8.4s, v8.4s \n"
" // fp32 *= scale_tm \n"
" fmul v8.4s, v8.4s, v12.4s \n"
" cmp %12, #0 \n"
" beq 17f \n"
" // fp32 += bias_tm \n"
" ld1 {v12.4s}, [%12] \n"
" fadd v8.4s, v8.4s, v12.4s \n"
" 17: \n"
" // fp32 -> int32 \n"
" fcvtas v8.4s, v8.4s \n"
" // int32 -> int16 \n"
" sqxtn v8.4h, v8.4s \n"
" // int16 -> int8 \n"
" sqxtn v8.8b, v8.8h \n"
" // save \n"
" st1 {v8.b}[0], [%2] \n"
" st1 {v8.b}[1], [%3] \n"
" st1 {v8.b}[2], [%4] \n"
" st1 {v8.b}[3], [%5] \n"
" b m4_finish \n"
" // no need to add the last output pointer\n"
" 16: \n"
" st1 {v8.s}[0], [%2] \n"
" st1 {v12.s}[0], [%3] \n"
" st1 {v16.s}[0], [%4] \n"
" st1 {v20.s}[0], [%5] \n"
" m4_finish: \n"
" mov x0, #0 \n"
: "=r"(pa), // %0
"=r"(pb), // %1
"=r"(pc0), // %2
"=r"(pc1), // %3
"=r"(pc2), // %4
"=r"(pc3), // %5
"=r"(k8_even),// %6
"=r"(k8), // %7
"=r"(k4), // %8
"=r"(k2), // %9
"=r"(k1), // %10
"=r"(scales), // %11
"=r"(bias) // %12
: "0"(pa),
"1"(pb),
"2"(pc0),
"3"(pc1),
"4"(pc2),
"5"(pc3),
"6"(k8_even),
"7"(k8),
"8"(k4),
"9"(k2),
"10"(k1),
"11"(scales),
"12"(bias)
: "cc", "memory", "x0", "x8", "w20", "x14", "x15", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
}
#undef DECOMPOSE_K
#undef DECOMPOSE_N
void int8kernel(void* dst, const int8_t* sa, const int8_t* sb, int m, int k, int n, int ldc, float* scales, float* bias, const Option& opt) {
int8_t* pa = (int8_t*)sa;
int8_t* pb = (int8_t*)sb;
const int nn = (m >> 2) << 2;
if (scales == 0) {
int32_t* pc = (int32_t*)dst;
#if PRINT_MATRIX
int32_t* origin = pc;
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4) {
int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, 0, 0);
}
pa += nn * k;
pc += nn * ldc;
switch(m-nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
pc += 2 * ldc;
pa += 2 * k;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, 0, 0);
break;
case 0:
default:
break;
}
#if PRINT_MATRIX
print_int32_matrix("pc", origin, m, n, ldc);
#endif
} else {
int8_t* pc = (int8_t*)dst;
#if PRINT_MATRIX
print_fp32_vec("scales", scales, m);
#endif
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = 0; i < nn; i += 4) {
int8kernel_m4((void*)(pc + i * ldc), pa + i * k, pb, m, k, n, ldc, scales + i, (bias==0)? 0: bias+i);
}
pa += nn * k;
pc += nn * ldc;
scales += nn;
bias = (bias == 0)? 0: bias + nn;
switch(m-nn)
{
case 3:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
pc += 2 * ldc;
pa += 2 * k;
scales += 2;
bias = (bias == 0)? 0: bias + 2;
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 2:
int8kernel_m2((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 1:
int8kernel_m1((void*)pc, pa, pb, m, k, n, ldc, scales, bias);
break;
case 0:
default:
break;
}
}
return;
}
#ifdef PRINT_MATRIX
#undef PRINT_MATRIX
#endif
#endif
|
GB_unop__identity_bool_uint8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_bool_uint8
// op(A') function: GB_unop_tran__identity_bool_uint8
// C type: bool
// A type: uint8_t
// cast: bool cij = (bool) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
bool z = (bool) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
bool z = (bool) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_bool_uint8
(
bool *Cx, // Cx and Ax may be aliased
const uint8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
bool z = (bool) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_bool_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__identity_fp32_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_fp32_int16
// op(A') function: GB_tran__identity_fp32_int16
// C type: float
// A type: int16_t
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
float z = (float) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_fp32_int16
(
float *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_fp32_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mMatOpen.c | #include <omp.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "readFiles.h"
#include "mMatOpen.h"
#include "MatrixIO.h"
char *fileName_mat1, *fileName_mat2, *fileName_mat3;
long int *mat1, *mat2, *mat3, *matFinal;
struct timeval ini_ler_mtz1, end_ler_mtz1, ini_ler_mtz2, end_ler_mtz2, ini_ler_mtz3, end_ler_mtz3, ini_mult, end_mult, tv;
unsigned int l_mat1, c_mat1, l_mat2, c_mat2, l_mat3, c_mat3, nThr;
struct timeval marcaTempo(struct timeval tv, time_t currenttime, char buffer[30]){
gettimeofday(&tv, NULL);
currenttime=tv.tv_sec;
strftime(buffer,30,"%d-%m-%Y %T.",localtime(¤ttime));
printf("%s%ld;", buffer, tv.tv_usec);
return tv;
}
void calculaTempo(struct timeval starttime, struct timeval entime){
printf("%ld;", (entime.tv_sec * 1000000 + entime.tv_usec)-(starttime.tv_sec * 1000000 + starttime.tv_usec));
}
/*
void printMatrix2(unsigned int lines, unsigned int columns, long int *mat){
for (int i=0; i < lines; i++){
for (int j=0; j < columns; j++){
printf("lines %ld; columns %ld;", i, j);
printf("%ld;\n", mat[i*columns + j]);
}
printf("\n");
}
printf("\n");
}
*/
long int *mMat(int nChunk,
unsigned int linMat1, unsigned int colMat1, long int *mat1,
unsigned int linMat2, unsigned int colMat2, long int *mat2,
unsigned int linMat3, unsigned int colMat3, long int *mat3,
long int *matFinal ){
int i=0;
int j=0;
int x=0;
//int chunk=5;
//printMatrix2(l_mat1,c_mat1,matFinal);
i=0;
j=0;
x=0;
/*printf("************************");
printMatrix2(l_mat1,c_mat1,mat1);
printf("************************");
printMatrix2(l_mat1,c_mat1,mat2);
printf("************************");
printMatrix2(l_mat3,c_mat3,mat3);
printf("************************");
printMatrix2(l_mat1,c_mat1,matFinal);
*/
omp_set_num_threads(nThr);
#pragma omp parallel private(i,j,x) shared(matFinal,mat1,mat2, mat3)
{
#pragma omp for schedule(dynamic, nChunk) nowait
for (i=0; i<linMat1; i++){
for( j=0; j<colMat2; j++){
for ( x=0; x<colMat1; x++){
matFinal[i*colMat2 + j] += mat1[i*colMat1+x] * mat2[x*colMat2+j];
// printf(" %s ; %s ; %s",matFinal[i*colMat2 + j],mat1[i*colMat1+x],mat2[x*colMat2+j]);
}
matFinal[i*colMat2 + j] += mat3[i*colMat2 + j];
}
}
}
return matFinal;
}
int main (int argc, char **argv){
char buffer[30];
time_t curtime;
nThr = atoi( argv[1] );
//nChunk = atoi( argv[2] );
l_mat1 = atoi( argv[2] );
c_mat1 = atoi( argv[3] );
fileName_mat1 = argv[4] ;
l_mat2 = atoi( argv[5] );
c_mat2 = atoi( argv[6] );
fileName_mat2 = argv[7] ;
l_mat3 = atoi( argv[8] );
c_mat3 = atoi( argv[9] );
fileName_mat3 = argv[10] ;
int nChunk = l_mat1/nThr;
matFinal = (long int *) malloc(sizeof(long int) * l_mat1 * c_mat2);
if (matFinal == NULL){
perror("I cannot allocate memory\n");
exit(EXIT_FAILURE);
return NULL;
}
for (int i=0; i<l_mat1;i++){
for(int j=0;j<c_mat2;j++){
matFinal[i*c_mat2 + j] =0;
}
}
printf("Tipo;l_mat1;c_mat1;ini_leit_mat1;fim_leit_mat1;tmp_leit_mat1;l_mat2;c_mat2;ini_leit_mat2;fim_leit_mat2;tm_leit_mat2;l_mat3;c_mat3;ini_leit_mat3;fim_leit_mat3;tm_leit_mat3;ini_mult_mat;fim_mult_mat;tmp_mult;\n");
printf("Open%d;%d;%d;",nThr,l_mat1,c_mat1);
ini_ler_mtz1 = marcaTempo(ini_ler_mtz1, curtime, buffer);
mat1 = readFile(fileName_mat1, l_mat1, c_mat1);
end_ler_mtz1 = marcaTempo(end_ler_mtz1, curtime, buffer);
calculaTempo(ini_ler_mtz1, end_ler_mtz1);
printf("%d;%d;",l_mat2,c_mat2);
ini_ler_mtz2 = marcaTempo(ini_ler_mtz2, curtime, buffer);
mat2 = readFile(fileName_mat2, l_mat2, c_mat2);
end_ler_mtz2 = marcaTempo(end_ler_mtz2, curtime, buffer);
calculaTempo(ini_ler_mtz2, end_ler_mtz2);
ini_ler_mtz3 = marcaTempo(ini_ler_mtz3, curtime, buffer);
mat3 = readFile(fileName_mat3, l_mat3, c_mat3);
end_ler_mtz3 = marcaTempo(end_ler_mtz3, curtime, buffer);
calculaTempo(ini_ler_mtz3, end_ler_mtz3);
ini_mult = marcaTempo(ini_mult, curtime, buffer);
matFinal = mMat(nChunk, l_mat1, c_mat1, mat1, l_mat2, c_mat2, mat2, l_mat3, c_mat3, mat3, matFinal);
end_mult = marcaTempo(end_mult, curtime, buffer);
calculaTempo(ini_mult, end_mult);
printf("\n");
// printMatrix2(l_mat1,c_mat1,mat1);
free(mat1);
// printMatrix2(l_mat2,c_mat2,mat2);
free(mat2);
free(mat3);
// printf("*****Final*****");
// printMatrix2(l_mat1,c_mat1,matFinal);
free(matFinal);
return 0;
}
|
GB_unop__identity_int16_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int16_int32)
// op(A') function: GB (_unop_tran__identity_int16_int32)
// C type: int16_t
// A type: int32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int16_int32)
(
int16_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int32_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int16_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__lnot_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__lnot_int32_int32
// op(A') function: GB_unop_tran__lnot_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = !(z != 0) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__lnot_int32_int32
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = !(z != 0) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__lnot_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mxv_omp_mpi.c | #include "mpi.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define min(x, y) ((x)<(y)?(x):(y))
/**
Program to multiply a matrix times a vector using both
mpi to distribute the computation among nodes and omp
to distribute the computation among threads.
*/
int main(int argc, char* argv[])
{
int nrows, ncols;
double *aa, *b, *c;
double *buffer, ans;
double *times;
double total_times;
int run_index;
int nruns;
int myid, master, numprocs;
double starttime, endtime;
MPI_Status status;
int i, j, numsent, sender;
int anstype, row;
srand(time(0));
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (argc > 1) {
nrows = atoi(argv[1]);
ncols = nrows;
aa = (double*)malloc(sizeof(double) * nrows * ncols);
b = (double*)malloc(sizeof(double) * ncols);
c = (double*)malloc(sizeof(double) * nrows);
buffer = (double*)malloc(sizeof(double) * ncols);
master = 0;
if (myid == master) {
// Master Code goes here
for (i = 0; i < nrows; i++) {
for (j = 0; j < ncols; j++) {
aa[i*ncols + j] = (double)rand()/RAND_MAX;
}
}
starttime = MPI_Wtime();
numsent = 0;
MPI_Bcast(b, ncols, MPI_DOUBLE, master, MPI_COMM_WORLD);
for (i = 0; i < min(numprocs-1, nrows); i++) {
for (j = 0; j < ncols; j++) {
buffer[j] = aa[i * ncols + j];
}
MPI_Send(buffer, ncols, MPI_DOUBLE, i+1, i+1, MPI_COMM_WORLD);
numsent++;
}
for (i = 0; i < nrows; i++) {
MPI_Recv(&ans, 1, MPI_DOUBLE, MPI_ANY_SOURCE, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
sender = status.MPI_SOURCE;
anstype = status.MPI_TAG;
c[anstype-1] = ans;
if (numsent < nrows) {
for (j = 0; j < ncols; j++) {
buffer[j] = aa[numsent*ncols + j];
}
MPI_Send(buffer, ncols, MPI_DOUBLE, sender, numsent+1,
MPI_COMM_WORLD);
numsent++;
} else {
MPI_Send(MPI_BOTTOM, 0, MPI_DOUBLE, sender, 0, MPI_COMM_WORLD);
}
}
endtime = MPI_Wtime();
printf("%f\n",(endtime - starttime));
} else {
// Slave Code goes here
MPI_Bcast(b, ncols, MPI_DOUBLE, master, MPI_COMM_WORLD);
if (myid <= nrows) {
while(1) {
MPI_Recv(buffer, ncols, MPI_DOUBLE, master, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
if (status.MPI_TAG == 0){
break;
}
row = status.MPI_TAG;
ans = 0.0;
#pragma omp parallel
#pragma omp shared(ans) for reduction(+:ans)
for (j = 0; j < ncols; j++) {
ans += buffer[j] * b[j];
}
MPI_Send(&ans, 1, MPI_DOUBLE, master, row, MPI_COMM_WORLD);
}
}
}
} else {
fprintf(stderr, "Usage matrix_times_vector <size>\n");
}
MPI_Finalize();
return 0;
}
|
Pstd.h | #pragma once
#include "Constants.h"
#include "FieldSolver.h"
#include "Grid.h"
#include "Vectors.h"
#include "PmlPstd.h"
namespace pfc {
class PSTD : public SpectralFieldSolver<PSTDGridType>
{
public:
PSTD(PSTDGrid * grid, double dt);
void updateFields();
void updateHalfB();
void updateE();
void setPML(int sizePMLx, int sizePMLy, int sizePMLz);
void setTimeStep(FP dt);
FP getCourantCondition() const {
double tmp = sqrt(1.0 / (grid->steps.x*grid->steps.x) +
1.0 / (grid->steps.y*grid->steps.y) +
1.0 / (grid->steps.z*grid->steps.z));
return 2.0 / (constants::pi * constants::c * tmp);
}
bool ifCourantConditionSatisfied(FP dt) const {
return dt < getCourantCondition();
}
private:
PmlSpectral<GridTypes::PSTDGridType>* getPml() {
return (PmlSpectral<GridTypes::PSTDGridType>*)pml.get();
}
};
inline PSTD::PSTD(PSTDGrid* grid, double dt) :
SpectralFieldSolver<GridTypes::PSTDGridType>(grid, dt, 0.0, 0.5*dt, 0.5*dt)
{
if (!ifCourantConditionSatisfied(dt)) {
std::cout
<< "WARNING: PSTD Courant condition is not satisfied. Another time step was setted up"
<< std::endl;
this->dt = getCourantCondition() * 0.5;
}
updateDims();
updateInternalDims();
}
inline void PSTD::setPML(int sizePMLx, int sizePMLy, int sizePMLz)
{
pml.reset(new PmlPstd(this, Int3(sizePMLx, sizePMLy, sizePMLz)));
updateInternalDims();
}
inline void PSTD::setTimeStep(FP dt)
{
if (ifCourantConditionSatisfied(dt)) {
this->dt = dt;
this->timeShiftB = 0.5*dt;
this->timeShiftJ = 0.5*dt;
if (pml.get()) pml.reset(new PmlPstd(this, pml->sizePML));
}
else {
std::cout
<< "WARNING: PSTD Courant condition is not satisfied. Time step was not changed"
<< std::endl;
}
}
inline void PSTD::updateFields()
{
doFourierTransform(fourier_transform::Direction::RtoC);
if (pml.get()) getPml()->updateBSplit();
updateHalfB();
if (pml.get()) getPml()->updateESplit();
updateE();
if (pml.get()) getPml()->updateBSplit();
updateHalfB();
doFourierTransform(fourier_transform::Direction::CtoR);
if (pml.get()) getPml()->doSecondStep();
globalTime += dt;
}
inline void PSTD::updateHalfB()
{
const Int3 begin = updateComplexBAreaBegin;
const Int3 end = updateComplexBAreaEnd;
double dt = 0.5 * this->dt;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
ComplexFP3 E(complexGrid->Ex(i, j, k), complexGrid->Ey(i, j, k), complexGrid->Ez(i, j, k));
ComplexFP3 crossKE = cross((ComplexFP3)getWaveVector(Int3(i, j, k)), E);
complexFP coeff = -complexFP::i() * constants::c * dt;
complexGrid->Bx(i, j, k) += coeff * crossKE.x;
complexGrid->By(i, j, k) += coeff * crossKE.y;
complexGrid->Bz(i, j, k) += coeff * crossKE.z;
}
}
}
inline void PSTD::updateE()
{
const Int3 begin = updateComplexEAreaBegin;
const Int3 end = updateComplexEAreaEnd;
OMP_FOR_COLLAPSE()
for (int i = begin.x; i < end.x; i++)
for (int j = begin.y; j < end.y; j++)
{
//#pragma omp simd
for (int k = begin.z; k < end.z; k++)
{
ComplexFP3 B(complexGrid->Bx(i, j, k), complexGrid->By(i, j, k), complexGrid->Bz(i, j, k));
ComplexFP3 J(complexGrid->Jx(i, j, k), complexGrid->Jy(i, j, k), complexGrid->Jz(i, j, k));
ComplexFP3 crossKB = cross((ComplexFP3)getWaveVector(Int3(i, j, k)), B);
complexFP coeff = complexFP::i() * constants::c * dt;
complexGrid->Ex(i, j, k) += coeff * crossKB.x - 4 * constants::pi * dt * J.x;
complexGrid->Ey(i, j, k) += coeff * crossKB.y - 4 * constants::pi * dt * J.y;
complexGrid->Ez(i, j, k) += coeff * crossKB.z - 4 * constants::pi * dt * J.z;
}
}
}
}
|
test-new-conv.c |
/* Test and timing harness program for developing a multichannel
multikernel convolution (as used in deep learning networks)
Note there are some simplifications around this implementation,
in particular with respect to computing the convolution at edge
pixels of the image.
Author: David Gregg
Date: February 2017
Version 1.4 : Modified the random generator to reduce the range
of generated values;
Changed the summation in the checking code from
float to double to try to bring the checked value
closer to the "true" value
Version 1.3 : Fixed which loop variables were being incremented
in write_out();
Fixed dimensions of output and control_output
matrices in main function
Version 1.2 : Changed distribution of test data to (hopefully)
eliminate random walk of floating point error;
Also introduced checks to restrict kernel-order to
a small set of values
Version 1.1 : Fixed bug in code to create 4d matrix
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#include <omp.h>
#include <math.h>
#include <x86intrin.h>
/* the following two definitions of DEBUGGING control whether or not
debugging information is written out. To put the program into
debugging mode, uncomment the following line: */
/*#define DEBUGGING(_x) _x */
/* to stop the printing of debugging information, use the following line: */
#define DEBUGGING(_x)
/* write 3d matrix to stdout */
void write_out(float *** a, int dim0, int dim1, int dim2)
{
int i, j, k;
for ( i = 0; i < dim0; i++ ) {
printf("Outer dimension number %d\n", i);
for ( j = 0; j < dim1; j++ ) {
for ( k = 0; k < dim2 - 1; k++ ) {
printf("%f, ", a[i][j][k]);
}
// print end of line
printf("%f\n", a[i][j][dim2-1]);
}
}
}
/* create new empty 4d matrix */
float **** new_empty_4d_matrix(int dim0, int dim1, int dim2, int dim3)
{
float **** result = malloc(dim0 * sizeof(float***));
float *** mat1 = malloc(dim0 * dim1 * sizeof(float**));
float ** mat2 = malloc(dim0 * dim1 * dim2 * sizeof(float*));
float * mat3 = malloc(dim0 * dim1 * dim2 *dim3 * sizeof(float));
int i, j, k;
for ( i = 0; i < dim0; i++ ) {
result[i] = &(mat1[i*dim1]);
for ( j = 0; j < dim1; j++ ) {
result[i][j] = &(mat2[i*dim1*dim2 + j*dim2]);
for ( k = 0; k < dim2; k++ ) {
result[i][j][k] = &(mat3[i*dim1*dim2*dim3+j*dim2*dim3+k*dim3]);
}
}
}
return result;
}
/* create new empty 3d matrix */
float *** new_empty_3d_matrix(int dim0, int dim1, int dim2)
{
float **** mat4d;
float *** mat3d;
// create a 4d matrix with single first dimension
mat4d = new_empty_4d_matrix(1, dim0, dim1, dim2);
// now throw away out first dimension
mat3d = mat4d[0];
free(mat4d);
return mat3d;
}
/* take a copy of the matrix and return in a newly allocated matrix */
float **** copy_4d_matrix(float **** source_matrix, int dim0,
int dim1, int dim2, int dim3)
{
int i, j, k, l;
float **** result = new_empty_4d_matrix(dim0, dim1, dim2, dim3);
for ( i = 0; i < dim0; i++ ) {
for ( j = 0; j < dim1; j++ ) {
for ( k = 0; k < dim2; k++ ) {
for ( l = 0; l < dim3; l++ ) {
result[i][j][k][l] = source_matrix[i][j][k][l];
}
}
}
}
return result;
}
/* create a matrix and fill it with random numbers */
float **** gen_random_4d_matrix(int dim0, int dim1, int dim2, int dim3)
{
float **** result;
int i, j, k, l;
struct timeval seedtime;
int seed;
result = new_empty_4d_matrix(dim0, dim1, dim2, dim3);
/* use the microsecond part of the current time as a pseudorandom seed */
gettimeofday(&seedtime, NULL);
seed = seedtime.tv_usec;
srandom(seed);
/* fill the matrix with random numbers */
const int range = 1 << 12; // 2^12
const int bias = 1 << 16; // 2^16
float offset = 0.0;
for ( i = 0; i < dim0; i++ ) {
for ( j = 0; j < dim1; j++ ) {
for ( k = 0; k < dim2; k++ ) {
for ( l = 0; l < dim3; l++ ) {
// generate uniform random integer with mean of zero
long long rand = random();
// now cut down the range and bias the mean to reduce
// the likelihood of large floating point round-off errors
int reduced_range = (rand % range);
float num = (((float) reduced_range) / ((float) bias))+offset;
result[i][j][k][l] = num;
}
}
}
}
return result;
}
/* create a matrix and fill it with random numbers */
float *** gen_random_3d_matrix(int dim0, int dim1, int dim2)
{
float **** mat4d;
float *** mat3d;
// create a 4d matrix with single first dimension
mat4d = gen_random_4d_matrix(1, dim0, dim1, dim2);
// now throw away out first dimension
mat3d = mat4d[0];
free(mat4d);
return mat3d;
}
/* check the sum of absolute differences is within reasonable epsilon */
void check_result(float *** result, float *** control,
int dim0, int dim1, int dim2)
{
int i, j, k;
double sum_abs_diff = 0.0;
const double EPSILON = 0.0625;
//printf("SAD\n");
for ( i = 0; i < dim0; i++ ) {
for ( j = 0; j < dim1; j++ ) {
for ( k = 0; k < dim2; k++ ) {
double diff = fabs(control[i][j][k] - result[i][j][k]);
assert( diff >= 0.0 );
sum_abs_diff = sum_abs_diff + diff;
}
}
}
if ( sum_abs_diff > EPSILON ) {
fprintf(stderr, "WARNING: sum of absolute differences (%f) > EPSILON (%f)\n",
sum_abs_diff, EPSILON);
}
else {
printf("COMMENT: sum of absolute differences (%f) within acceptable range (%f)\n", sum_abs_diff, EPSILON);
}
}
/* the slow but correct version of matmul written by David */
void multichannel_conv(float *** image, float **** kernels, float *** output,
int width, int height, int nchannels, int nkernels,
int kernel_order)
{
int h, w, x, y, c, m;
for ( w = 0; w < width; w++ ) {
for ( h = 0; h < height; h++ ) {
for ( m = 0; m < nkernels; m++ ) {
double sum = 0.0;
for ( c = 0; c < nchannels; c++ ) {
for ( x = 0; x < kernel_order; x++) {
for ( y = 0; y < kernel_order; y++ ) {
sum += image[w+x][h+y][c] * kernels[m][c][x][y];
}
}
output[m][w][h] = sum;
}
}
}
}
}
/* the fast version of matmul written by the team */
void team_conv(float *** image, float **** kernels, float *** output,
int width, int height, int nchannels, int nkernels,
int kernel_order)
{
int h, w, x, y, c, m;
__m128 v_image, v_kernels, v_pro, v_sum;
switch(kernel_order) {
case 1: // x and y dont change
#pragma omp parallel for private(m,w,h,c,x,y,v_image,v_kernels,v_pro,v_sum) collapse(3)
for (w = 0; w < width; w++) {
for (h = 0; h < height; h++) {
for (m = 0; m < nkernels; m++) {
v_sum = _mm_set1_ps(0.0);
float sum = 0.0;
for (c = 0; c < nchannels-3; c+=4) {
v_image = _mm_loadu_ps(&image[w][h][c]);
v_kernels = _mm_loadu_ps(&kernels[m][c][0][0]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
}
v_sum = _mm_hadd_ps(v_sum,v_sum);
v_sum = _mm_hadd_ps(v_sum,v_sum);
sum = _mm_cvtss_f32(v_sum);
for(; c < nchannels; c++) {
sum += image[w][h][c] * kernels[m][c][0][0];
}
output[m][w][h] = sum;
}
}
} break;
case 3: // x and y go up to 2
#pragma omp parallel for private(m,w,h,c,x,y,v_image,v_kernels,v_pro,v_sum) collapse(3)
for ( w = 0; w < width; w++ ) {
for ( h = 0; h < height; h++ ) {
for ( m = 0; m < nkernels; m++ ) {
v_sum = _mm_set1_ps(0.0);
float sum = 0.0;
for ( c = 0; c < nchannels; c++) {
v_image = _mm_set_ps(image[w+0][h+0][c],image[w+0][h+1][c],
image[w+0][h+2][c],image[w+1][h+0][c]);
v_kernels = _mm_set_ps(kernels[m][c][0][0],kernels[m][c][0][1],
kernels[m][c][0][2], kernels[m][c][1][0]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+1][h+1][c],image[w+1][h+2][c],
image[w+2][h+0][c],image[w+2][h+1][c]);
v_kernels = _mm_set_ps(kernels[m][c][1][1],kernels[m][c][1][2],
kernels[m][c][2][0],kernels[m][c][2][1]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
sum += image[w+2][h+2][c] * kernels[m][c][2][2];
}
v_sum = _mm_hadd_ps(v_sum,v_sum);
v_sum = _mm_hadd_ps(v_sum,v_sum);
sum += _mm_cvtss_f32(v_sum);
output[m][w][h] = sum;
}
}
} break;
case 5: // x and y go up to 4
#pragma omp parallel for private(m,w,h,c,x,y,v_image,v_kernels,v_pro,v_sum) collapse(3)
for ( w = 0; w < width; w++ ) {
for ( h = 0; h < height; h++ ) {
for ( m = 0; m < nkernels; m++ ) {
v_sum = _mm_set1_ps(0.0);
float sum = 0.0;
for ( c = 0; c < nchannels; c++) {
v_image = _mm_set_ps(image[w+0][h+0][c],image[w+0][h+1][c],
image[w+0][h+2][c],image[w+0][h+3][c]);
v_kernels = _mm_set_ps(kernels[m][c][0][0],kernels[m][c][0][1],
kernels[m][c][0][2],kernels[m][c][0][3]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+0][h+4][c],image[w+1][h+0][c],
image[w+1][h+1][c],image[w+1][h+2][c]);
v_kernels = _mm_set_ps(kernels[m][c][0][4],kernels[m][c][1][0],
kernels[m][c][1][1],kernels[m][c][1][2]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+1][h+3][c],image[w+1][h+4][c],
image[w+2][h+0][c],image[w+2][h+1][c]);
v_kernels = _mm_set_ps(kernels[m][c][1][3],kernels[m][c][1][4],
kernels[m][c][2][0],kernels[m][c][2][1]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+2][h+2][c],image[w+2][h+3][c],
image[w+2][h+4][c],image[w+3][h+0][c]);
v_kernels = _mm_set_ps(kernels[m][c][2][2],kernels[m][c][2][3],
kernels[m][c][2][4],kernels[m][c][3][0]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+3][h+1][c],image[w+3][h+2][c],
image[w+3][h+3][c],image[w+3][h+4][c]);
v_kernels = _mm_set_ps(kernels[m][c][3][1],kernels[m][c][3][2],
kernels[m][c][3][3],kernels[m][c][3][4]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+4][h+0][c],image[w+4][h+1][c],
image[w+4][h+2][c],image[w+4][h+3][c]);
v_kernels = _mm_set_ps(kernels[m][c][4][0],kernels[m][c][4][1],
kernels[m][c][4][2],kernels[m][c][4][3]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
sum += image[w+4][h+4][c] * kernels[m][c][4][4];
}
v_sum = _mm_hadd_ps(v_sum,v_sum);
v_sum = _mm_hadd_ps(v_sum,v_sum);
sum += _mm_cvtss_f32(v_sum);
output[m][w][h] = sum;
}
}
} break;
case 7:
#pragma omp parallel for private(m,w,h,c,x,y,v_image,v_kernels,v_pro,v_sum) collapse(3)
for ( w = 0; w < width; w++ ) {
for ( h = 0; h < height; h++ ) {
for ( m = 0; m < nkernels; m++ ) {
v_sum = _mm_set1_ps(0.0);
float sum = 0.0;
for ( c = 0; c < nchannels; c++) {
v_image = _mm_set_ps(image[w+0][h+0][c],image[w+0][h+1][c],
image[w+0][h+2][c],image[w+0][h+3][c]);
v_kernels = _mm_set_ps(kernels[m][c][0][0],kernels[m][c][0][1],
kernels[m][c][0][2],kernels[m][c][0][3]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+0][h+4][c],image[w+0][h+5][c],
image[w+0][h+6][c],image[w+1][h+0][c]);
v_kernels = _mm_set_ps(kernels[m][c][0][4],kernels[m][c][0][5],
kernels[m][c][0][6],kernels[m][c][1][0]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+1][h+1][c],image[w+1][h+2][c],
image[w+1][h+3][c],image[w+1][h+4][c]);
v_kernels = _mm_set_ps(kernels[m][c][1][1],kernels[m][c][1][2],
kernels[m][c][1][3],kernels[m][c][1][4]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+1][h+5][c],image[w+1][h+6][c],
image[w+2][h+0][c],image[w+2][h+1][c]);
v_kernels = _mm_set_ps(kernels[m][c][1][5],kernels[m][c][1][6],
kernels[m][c][2][0],kernels[m][c][2][1]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+2][h+2][c],image[w+2][h+3][c],
image[w+2][h+4][c],image[w+2][h+5][c]);
v_kernels = _mm_set_ps(kernels[m][c][2][2],kernels[m][c][2][3],
kernels[m][c][2][4],kernels[m][c][2][5]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+2][h+6][c],image[w+3][h+0][c],
image[w+3][h+1][c],image[w+3][h+2][c]);
v_kernels = _mm_set_ps(kernels[m][c][2][6],kernels[m][c][3][0],
kernels[m][c][3][1],kernels[m][c][3][2]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+3][h+3][c],image[w+3][h+4][c],
image[w+3][h+5][c],image[w+3][h+6][c]);
v_kernels = _mm_set_ps(kernels[m][c][3][3],kernels[m][c][3][4],
kernels[m][c][3][5],kernels[m][c][3][6]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+4][h+0][c],image[w+4][h+1][c],
image[w+4][h+2][c],image[w+4][h+3][c]);
v_kernels = _mm_set_ps(kernels[m][c][4][0],kernels[m][c][4][1],
kernels[m][c][4][2],kernels[m][c][4][3]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+4][h+4][c],image[w+4][h+5][c],
image[w+4][h+6][c],image[w+5][h+0][c]);
v_kernels = _mm_set_ps(kernels[m][c][4][4],kernels[m][c][4][5],
kernels[m][c][4][6],kernels[m][c][5][0]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+5][h+1][c],image[w+5][h+2][c],
image[w+5][h+3][c],image[w+5][h+4][c]);
v_kernels = _mm_set_ps(kernels[m][c][5][1],kernels[m][c][5][2],
kernels[m][c][5][3],kernels[m][c][5][4]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+5][h+5][c],image[w+5][h+6][c],
image[w+6][h+0][c],image[w+6][h+1][c]);
v_kernels = _mm_set_ps(kernels[m][c][5][5],kernels[m][c][5][6],
kernels[m][c][6][0],kernels[m][c][6][1]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
v_image = _mm_set_ps(image[w+6][h+2][c],image[w+6][h+3][c],
image[w+6][h+4][c],image[w+6][h+5][c]);
v_kernels = _mm_set_ps(kernels[m][c][6][2],kernels[m][c][6][3],
kernels[m][c][6][4],kernels[m][c][6][5]);
v_pro = _mm_mul_ps(v_image, v_kernels);
v_sum = _mm_add_ps(v_sum, v_pro);
sum += image[w+6][h+6][c] * kernels[m][c][6][6];
}
v_sum = _mm_hadd_ps(v_sum,v_sum);
v_sum = _mm_hadd_ps(v_sum,v_sum);
sum += _mm_cvtss_f32(v_sum);
output[m][w][h] = sum;
}
}
} break;
default:break;// x and y go up to 6
}
}
int main(int argc, char ** argv)
{
//float image[W][H][C];
//float kernels[M][C][K][K];
//float output[M][W][H];
float *** image, **** kernels, *** output;
float *** control_output;
long long mul_time_g;
long long mul_time_t;
int width, height, kernel_order, nchannels, nkernels;
struct timeval start_time_g;
struct timeval stop_time_g;
struct timeval start_time_t;
struct timeval stop_time_t;
{
/* data */
};
if ( argc != 6 ) {
fprintf(stderr, "Usage: conv-harness <image_width> <image_height> <kernel_order> <number of channels> <number of kernels>\n");
exit(1);
}
else {
width = atoi(argv[1]);
height = atoi(argv[2]);
kernel_order = atoi(argv[3]);
nchannels = atoi(argv[4]);
nkernels = atoi(argv[5]);
}
switch ( kernel_order ) {
case 1:
case 3:
case 5:
case 7: break;
default:
fprintf(stderr, "FATAL: kernel_order must be 1, 3, 5 or 7, not %d\n",
kernel_order);
exit(1);
}
/* allocate the matrices */
image = gen_random_3d_matrix(width+kernel_order, height + kernel_order,
nchannels);
kernels = gen_random_4d_matrix(nkernels, nchannels, kernel_order, kernel_order);
output = new_empty_3d_matrix(nkernels, width, height);
control_output = new_empty_3d_matrix(nkernels, width, height);
//DEBUGGING(write_out(A, a_dim1, a_dim2));
/* record starting time of Greg's code*/
gettimeofday(&start_time_g, NULL);
/* use a simple multichannel convolution routine to produce control result */
multichannel_conv(image, kernels, control_output, width,
height, nchannels, nkernels, kernel_order);
/* record Greg's finishing time */
gettimeofday(&stop_time_g, NULL);
mul_time_g = (stop_time_g.tv_sec - start_time_g.tv_sec) * 1000000L +
(stop_time_g.tv_usec - start_time_g.tv_usec);
printf("Greg conv time: %lld microseconds\n", mul_time_g);
/* record starting time of team's code*/
gettimeofday(&start_time_t, NULL);
/* perform student team's multichannel convolution */
team_conv(image, kernels, output, width,
height, nchannels, nkernels, kernel_order);
/* record finishing time */
gettimeofday(&stop_time_t, NULL);
mul_time_t = (stop_time_t.tv_sec - start_time_t.tv_sec) * 1000000L +
(stop_time_t.tv_usec - start_time_t.tv_usec);
printf("Team conv time: %lld microseconds\n", mul_time_t);
long long time = (mul_time_g/mul_time_t);
long long mintime = (mul_time_t/mul_time_g);
if(time >= 1){
printf("You're %lld times faster\n", time);
}
else{
printf("You're %lld times slower\n", mintime);
}
DEBUGGING(write_out(output, nkernels, width, height));
/* now check that the team's multichannel convolution routine
gives the same answer as the known working version */
check_result(output, control_output, nkernels, width, height);
return 0;
} |
GB_kroner.c | //------------------------------------------------------------------------------
// GB_kroner: Kronecker product, C = kron (A,B)
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// C = kron(A,B) where op determines the binary multiplier to use. The type of
// A and B are compatible with the x and y inputs of z=op(x,y), but can be
// different. The type of C is the type of z. C is hypersparse if either A
// or B are hypersparse.
// FUTURE: GB_kron would be faster with built-in types and operators.
// FUTURE: at most one thread is used for each vector of C=kron(A,B). The
// matrix C is normally very large, but if both A and B are n-by-1, then C is
// n^2-by-1 and only a single thread is used. A better method for this case
// would construct vectors of C in parallel.
// FUTURE: each vector C(:,k) takes O(nnz(C(:,k))) work, but this is not
// accounted for in the parallel load-balancing.
#include "GB_kron.h"
GrB_Info GB_kroner // C = kron (A,B)
(
GrB_Matrix *Chandle, // output matrix
const bool C_is_csc, // desired format of C
const GrB_BinaryOp op, // multiply operator
const GrB_Matrix A, // input matrix
const GrB_Matrix B, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (Chandle != NULL) ;
ASSERT_OK (GB_check (A, "A for kron (A,B)", GB0)) ;
ASSERT_OK (GB_check (B, "B for kron (A,B)", GB0)) ;
ASSERT_OK (GB_check (op, "op for kron (A,B)", GB0)) ;
ASSERT (!GB_PENDING (A)) ; ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_PENDING (B)) ; ASSERT (!GB_ZOMBIES (B)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GrB_Info info ;
(*Chandle) = NULL ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const GB_void *restrict Ax = A->x ;
const int64_t asize = A->type->size ;
const int64_t avlen = A->vlen ;
const int64_t avdim = A->vdim ;
int64_t anvec = A->nvec ;
int64_t anz = GB_NNZ (A) ;
const int64_t *restrict Bp = B->p ;
const int64_t *restrict Bh = B->h ;
const int64_t *restrict Bi = B->i ;
const GB_void *restrict Bx = B->x ;
const int64_t bsize = B->type->size ;
const int64_t bvlen = B->vlen ;
const int64_t bvdim = B->vdim ;
int64_t bnvec = B->nvec ;
int64_t bnz = GB_NNZ (B) ;
//--------------------------------------------------------------------------
// determine the number of threads to use
//--------------------------------------------------------------------------
double work = ((double) anz) * ((double) bnz)
+ (((double) anvec) * ((double) bnvec)) ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int nthreads = GB_nthreads (work, chunk, nthreads_max) ;
//--------------------------------------------------------------------------
// allocate the output matrix C
//--------------------------------------------------------------------------
// C has the same type as z for the multiply operator, z=op(x,y)
GrB_Index cvlen, cvdim, cnzmax, cnvec ;
bool ok = GB_Index_multiply (&cvlen, avlen, bvlen) ;
ok = ok & GB_Index_multiply (&cvdim, avdim, bvdim) ;
ok = ok & GB_Index_multiply (&cnzmax, anz, bnz) ;
ok = ok & GB_Index_multiply (&cnvec, anvec, bnvec) ;
ASSERT (ok) ;
// C is hypersparse if either A or B are hypersparse
bool C_is_hyper = (cvdim > 1) && (A->is_hyper || B->is_hyper) ;
GrB_Matrix C = NULL ; // allocate a new header for C
GB_CREATE (&C, op->ztype, (int64_t) cvlen, (int64_t) cvdim, GB_Ap_malloc,
C_is_csc, GB_SAME_HYPER_AS (C_is_hyper), B->hyper_ratio, cnvec,
cnzmax, true, Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
return (info) ;
}
//--------------------------------------------------------------------------
// get C
//--------------------------------------------------------------------------
int64_t *restrict Cp = C->p ;
int64_t *restrict Ch = C->h ;
int64_t *restrict Ci = C->i ;
GB_void *restrict Cx = C->x ;
const int64_t csize = C->type->size ;
GxB_binary_function fmult = op->function ;
GB_cast_function
cast_A = GB_cast_factory (op->xtype->code, A->type->code),
cast_B = GB_cast_factory (op->ytype->code, B->type->code) ;
//--------------------------------------------------------------------------
// compute the column counts of C, and C->h if C is hypersparse
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2)
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
for (int64_t kB = 0 ; kB < bnvec ; kB++)
{
// get A(:,jA), the (kA)th vector of A
int64_t jA = (Ah == NULL) ? kA : Ah [kA] ;
int64_t aknz = Ap [kA+1] - Ap [kA] ;
// get B(:,jB), the (kB)th vector of B
int64_t jB = (Bh == NULL) ? kB : Bh [kB] ;
int64_t bknz = Bp [kB+1] - Bp [kB] ;
// determine # entries in C(:,jC), the (kC)th vector of C
int64_t kC = kA * bnvec + kB ;
Cp [kC] = aknz * bknz ;
if (C_is_hyper)
{
Ch [kC] = jA * bvdim + jB ;
}
}
}
//--------------------------------------------------------------------------
// replace Cp with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nthreads) ;
if (C_is_hyper) C->nvec = cnvec ;
C->magic = GB_MAGIC ;
//--------------------------------------------------------------------------
// C = kron (A,B)
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(guided) collapse(2)
for (int64_t kA = 0 ; kA < anvec ; kA++)
{
for (int64_t kB = 0 ; kB < bnvec ; kB++)
{
// get B(:,jB), the (kB)th vector of B
int64_t pB_start = Bp [kB] ;
int64_t pB_end = Bp [kB+1] ;
int64_t bknz = pB_start - pB_end ;
if (bknz == 0) continue ;
GB_void bwork [GB_PGI(bsize)] ;
// get C(:,jC), the (kC)th vector of C
int64_t kC = kA * bnvec + kB ;
int64_t pC = Cp [kC] ;
// get A(:,jA), the (kA)th vector of A
int64_t pA_start = Ap [kA] ;
int64_t pA_end = Ap [kA+1] ;
GB_void awork [GB_PGI(asize)] ;
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
// awork = A(iA,jA), typecasted to op->xtype
int64_t iA = Ai [pA] ;
int64_t iAblock = iA * bvlen ;
cast_A (awork, Ax +(pA*asize), asize) ;
for (int64_t pB = pB_start ; pB < pB_end ; pB++)
{
// bwork = B(iB,jB), typecasted to op->ytype
int64_t iB = Bi [pB] ;
cast_B (bwork, Bx +(pB*bsize), bsize) ;
// C(iC,jC) = A(iA,jA) * B(iB,jB)
int64_t iC = iAblock + iB ;
Ci [pC] = iC ;
fmult (Cx +(pC*csize), awork, bwork) ;
pC++ ;
}
}
}
}
//--------------------------------------------------------------------------
// remove empty vectors from C, if hypersparse
//--------------------------------------------------------------------------
if (C_is_hyper && C->nvec_nonempty < cnvec)
{
// create new Cp_new and Ch_new arrays, with no empty vectors
int64_t *restrict Cp_new = NULL ;
int64_t *restrict Ch_new = NULL ;
int64_t nvec_new ;
info = GB_hyper_prune (&Cp_new, &Ch_new, &nvec_new, C->p, C->h, cnvec,
Context) ;
if (info != GrB_SUCCESS)
{
// out of memory
GB_MATRIX_FREE (&C) ;
return (info) ;
}
// transplant the new hyperlist into C
GB_FREE_MEMORY (C->p, cnvec+1, sizeof (int64_t)) ;
GB_FREE_MEMORY (C->h, cnvec, sizeof (int64_t)) ;
C->p = Cp_new ;
C->h = Ch_new ;
C->nvec = nvec_new ;
C->plen = nvec_new ;
ASSERT (C->nvec == C->nvec_nonempty) ;
}
ASSERT (C->nvec_nonempty == GB_nvec_nonempty (C, Context)) ;
//--------------------------------------------------------------------------
// return result
//--------------------------------------------------------------------------
ASSERT_OK (GB_check (C, "C=kron(A,B)", GB0)) ;
(*Chandle) = C ;
return (GrB_SUCCESS) ;
}
|
pi-v15.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h> /* OpenMP */
#if _DEBUG_
#define _DEBUG_ 1
#else
#define _DEBUG_ 0
#endif
int main(int argc, char *argv[]) {
double x, sum=0.0, pi=0.0;
#if !_DEBUG_
double start,end;
#endif
int i;
const char Usage[] = "Usage: pi <num_steps> (try 1000000000)\n";
if (argc < 2) {
fprintf(stderr, Usage);
exit(1);
}
int num_steps = atoi(argv[1]);
double step = 1.0/(double) num_steps;
#if !_DEBUG_
start= omp_get_wtime();
#endif
/* do computation */
// WARNING : correct code
#pragma omp parallel
#pragma omp single
{
#pragma omp task private(i,x) shared(sum)
for (i=0; i < num_steps/2; i++) {
x = (i+0.5)*step;
#pragma omp atomic
sum += 4.0/(1.0+x*x);
#if _DEBUG_
int id = omp_get_thread_num();
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp task private(i,x) shared(sum)
for (i=num_steps/2; i < num_steps; i++) {
x = (i+0.5)*step;
#pragma omp atomic
sum += 4.0/(1.0+x*x);
#if _DEBUG_
int id = omp_get_thread_num();
printf("thread id:%d it:%d\n",id,i);
#endif
}
#pragma omp taskwait
#pragma omp task
pi = step * sum;
}
#if !_DEBUG_
end = omp_get_wtime();
printf("Wall clock execution time = %.9f seconds\n", end-start);
#endif
/* print results */
printf("Value of pi = %12.10f\n", pi);
return EXIT_SUCCESS;
}
|
task-taskwait-nested.c | /*
Copyright (c) 2015-2019, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Simone Atzeni (simone@cs.utah.edu), Joachim Protze
(joachim.protze@tu-dresden.de), Jonas Hahnfeld
(hahnfeld@itc.rwth-aachen.de), Ganesh Gopalakrishnan, Zvonimir
Rakamaric, Dong H. Ahn, Gregory L. Lee, Ignacio Laguna, and Martin
Schulz.
LLNL-CODE-773957
All rights reserved.
This file is part of Archer. For details, see
https://pruners.github.io/archer. Please also read
https://github.com/PRUNERS/archer/blob/master/LICENSE.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE
LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// RUN: %libarcher-compile-and-run-race | FileCheck %s
#include <omp.h>
#include <stdio.h>
#include <unistd.h>
int main(int argc, char* argv[])
{
int var = 0;
#pragma omp parallel num_threads(2) shared(var)
#pragma omp master
{
#pragma omp task shared(var)
{
#pragma omp task shared(var)
{
var++;
}
}
// Give other thread time to steal the task and execute its child.
sleep(1);
// Only directly generated children are guaranteed to be executed.
#pragma omp taskwait
var++;
}
int error = (var != 2);
fprintf(stderr, "DONE\n");
return error;
}
// CHECK: WARNING: ThreadSanitizer: data race
// CHECK: Write of size 4
// CHECK: #0 .omp_outlined.
// CHECK: Previous write of size 4
// CHECK: #0 .omp_outlined.
// CHECK: DONE
|
GB_binop__isle_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isle_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__isle_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__isle_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_fp32)
// A*D function (colscale): GB (_AxD__isle_fp32)
// D*A function (rowscale): GB (_DxB__isle_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__isle_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__isle_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_fp32)
// C=scalar+B GB (_bind1st__isle_fp32)
// C=scalar+B' GB (_bind1st_tran__isle_fp32)
// C=A+scalar GB (_bind2nd__isle_fp32)
// C=A'+scalar GB (_bind2nd_tran__isle_fp32)
// C type: float
// A type: float
// B,b type: float
// BinaryOp: cij = (aij <= bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x <= y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLE || GxB_NO_FP32 || GxB_NO_ISLE_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isle_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isle_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isle_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isle_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isle_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isle_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isle_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isle_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isle_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isle_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isle_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x <= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isle_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij <= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x <= aij) ; \
}
GrB_Info GB (_bind1st_tran__isle_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij <= y) ; \
}
GrB_Info GB (_bind2nd_tran__isle_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
base.c | /*
* `Pattern detection in large temporal graphs using algebraic fingerprints`
*
* This experimental source code is supplied to accompany the
* aforementioned paper.
*
* The source code is configured for a gcc build to a native
* microarchitecture that must support the AVX2 and PCLMULQDQ
* instruction set extensions. Other builds are possible but
* require manual configuration of 'Makefile' and 'builds.h'.
*
* The source code is subject to the following license.
*
* The MIT License (MIT)
*
* Copyright (c) 2019 S. Thejaswi, A. Gionis
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*
*/
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<time.h>
#include<sys/utsname.h>
#include<string.h>
#include<stdarg.h>
#include<assert.h>
#include<ctype.h>
#include<omp.h>
/************************************************************* Configuration. */
#define MAX_K 32
#define MAX_SHADES 32
#define PREFETCH_PAD 32
#define MAX_THREADS 128
#define UNDEFINED -1
#define MATH_INF ((index_t)0x3FFFFFFF)
typedef long int index_t; // default to 64-bit indexing
typedef unsigned int shade_map_t;
#include"ffprng.h" // fast-forward pseudorandom number generator
typedef unsigned long scalar_t;
/********************************************************************* Flags. */
index_t flag_bin_input = 0; // default to ASCII input
/************************************************************* Common macros. */
/* Linked list navigation macros. */
#define pnlinknext(to,el) { (el)->next = (to)->next; (el)->prev = (to); (to)->next->prev = (el); (to)->next = (el); }
#define pnlinkprev(to,el) { (el)->prev = (to)->prev; (el)->next = (to); (to)->prev->next = (el); (to)->prev = (el); }
#define pnunlink(el) { (el)->next->prev = (el)->prev; (el)->prev->next = (el)->next; }
#define pnrelink(el) { (el)->next->prev = (el); (el)->prev->next = (el); }
/*********************************************************** Error reporting. */
#define ERROR(...) error(__FILE__,__LINE__,__func__,__VA_ARGS__);
static void error(const char *fn, int line, const char *func,
const char *format, ...)
{
va_list args;
va_start(args, format);
fprintf(stderr,
"ERROR [file = %s, line = %d]\n"
"%s: ",
fn,
line,
func);
vfprintf(stderr, format, args);
fprintf(stderr, "\n");
va_end(args);
abort();
}
/********************************************************* Get the host name. */
#define MAX_HOSTNAME 256
const char *sysdep_hostname(void)
{
static char hn[MAX_HOSTNAME];
struct utsname undata;
uname(&undata);
strcpy(hn, undata.nodename);
return hn;
}
/********************************************************* Available threads. */
index_t num_threads(void)
{
#ifdef BUILD_PARALLEL
return omp_get_max_threads();
#else
return 1;
#endif
}
/********************************************** Memory allocation & tracking. */
#define MALLOC(x) malloc_wrapper(x)
#define FREE(x) free_wrapper(x)
index_t malloc_balance = 0;
struct malloc_track_struct
{
void *p;
size_t size;
struct malloc_track_struct *prev;
struct malloc_track_struct *next;
};
typedef struct malloc_track_struct malloc_track_t;
malloc_track_t malloc_track_root;
size_t malloc_total = 0;
#define MEMTRACK_STACK_CAPACITY 256
size_t memtrack_stack[MEMTRACK_STACK_CAPACITY];
index_t memtrack_stack_top = -1;
void *malloc_wrapper(size_t size)
{
if(malloc_balance == 0) {
malloc_track_root.prev = &malloc_track_root;
malloc_track_root.next = &malloc_track_root;
}
void *p = malloc(size);
if(p == NULL)
ERROR("malloc fails");
malloc_balance++;
malloc_track_t *t = (malloc_track_t *) malloc(sizeof(malloc_track_t));
t->p = p;
t->size = size;
pnlinkprev(&malloc_track_root, t);
malloc_total += size;
for(index_t i = 0; i <= memtrack_stack_top; i++)
if(memtrack_stack[i] < malloc_total)
memtrack_stack[i] = malloc_total;
return p;
}
void free_wrapper(void *p)
{
malloc_track_t *t = malloc_track_root.next;
for(;
t != &malloc_track_root;
t = t->next) {
if(t->p == p)
break;
}
if(t == &malloc_track_root)
ERROR("FREE issued on a non-tracked pointer %p", p);
malloc_total -= t->size;
pnunlink(t);
free(t);
free(p);
malloc_balance--;
}
index_t *alloc_idxtab(index_t n)
{
index_t *t = (index_t *) MALLOC(sizeof(index_t)*n);
return t;
}
void push_memtrack(void)
{
assert(memtrack_stack_top + 1 < MEMTRACK_STACK_CAPACITY);
memtrack_stack[++memtrack_stack_top] = malloc_total;
}
size_t pop_memtrack(void)
{
assert(memtrack_stack_top >= 0);
return memtrack_stack[memtrack_stack_top--];
}
size_t current_mem(void)
{
return malloc_total;
}
double inGiB(size_t s)
{
return (double) s / (1 << 30);
}
void print_current_mem(void)
{
fprintf(stdout, "{curr: %.2lfGiB}", inGiB(current_mem()));
fflush(stdout);
}
void print_pop_memtrack(void)
{
fprintf(stdout, "{peak: %.2lfGiB}", inGiB(pop_memtrack()));
fflush(stdout);
}
/******************************************************** Timing subroutines. */
#define TIME_STACK_CAPACITY 256
double start_stack[TIME_STACK_CAPACITY];
index_t start_stack_top = -1;
void push_time(void)
{
assert(start_stack_top + 1 < TIME_STACK_CAPACITY);
start_stack[++start_stack_top] = omp_get_wtime();
}
double pop_time(void)
{
double wstop = omp_get_wtime();
assert(start_stack_top >= 0);
double wstart = start_stack[start_stack_top--];
return (double) (1000.0*(wstop-wstart));
}
/******************************************************************* Sorting. */
void shellsort(index_t n, index_t *a)
{
index_t h = 1;
index_t i;
for(i = n/3; h < i; h = 3*h+1)
;
do {
for(i = h; i < n; i++) {
index_t v = a[i];
index_t j = i;
do {
index_t t = a[j-h];
if(t <= v)
break;
a[j] = t;
j -= h;
} while(j >= h);
a[j] = v;
}
h /= 3;
} while(h > 0);
}
#define LEFT(x) (x<<1)
#define RIGHT(x) ((x<<1)+1)
#define PARENT(x) (x>>1)
void heapsort_indext(index_t n, index_t *a)
{
/* Shift index origin from 0 to 1 for convenience. */
a--;
/* Build heap */
for(index_t i = 2; i <= n; i++) {
index_t x = i;
while(x > 1) {
index_t y = PARENT(x);
if(a[x] <= a[y]) {
/* heap property ok */
break;
}
/* Exchange a[x] and a[y] to enforce heap property */
index_t t = a[x];
a[x] = a[y];
a[y] = t;
x = y;
}
}
/* Repeat delete max and insert */
for(index_t i = n; i > 1; i--) {
index_t t = a[i];
/* Delete max */
a[i] = a[1];
/* Insert t */
index_t x = 1;
index_t y, z;
while((y = LEFT(x)) < i) {
z = RIGHT(x);
if(z < i && a[y] < a[z]) {
index_t s = z;
z = y;
y = s;
}
/* Invariant: a[y] >= a[z] */
if(t >= a[y]) {
/* ok to insert here without violating heap property */
break;
}
/* Move a[y] up the heap */
a[x] = a[y];
x = y;
}
/* Insert here */
a[x] = t;
}
}
/******************************************************* Bitmap manipulation. */
void bitset(index_t *map, index_t j, index_t value)
{
assert((value & (~1UL)) == 0);
map[j/64] = (map[j/64] & ~(1UL << (j%64))) | ((value&1) << (j%64));
}
index_t bitget(index_t *map, index_t j)
{
return (map[j/64]>>(j%64))&1UL;
}
/******************************************************************** Stack. */
typedef struct stack_node {
index_t u;
//index_t l;
index_t t;
} stack_node_t;
typedef struct stack {
index_t size; // size of stack
index_t n; // number of elements
stack_node_t *a;
}stk_t;
stk_t * stack_alloc(index_t size)
{
stk_t *s = (stk_t *) malloc(sizeof(stk_t));
s->size = size;
s->n = 0;
s->a = (stack_node_t *) malloc(s->size*sizeof(stack_node_t));
return s;
}
void stack_free(stk_t *s)
{
free(s->a);
free(s);
}
void stack_push(stk_t *s, stack_node_t *e_in)
{
assert(s->n < s->size);
stack_node_t *e = s->a + s->n;
e->u = e_in->u;
//e->l = e_in->l;
e->t = e_in->t;
s->n++;
}
void stack_pop(stk_t *s, stack_node_t *e_out)
{
assert(s->n > 0);
s->n--;
stack_node_t *e = s->a + s->n;
e_out->u = e->u;
//e_out->l = e->l;
e_out->t = e->t;
}
void stack_top(stk_t *s, stack_node_t *e_out)
{
assert(s->n >= 0);
stack_node_t *e = s->a + s->n-1;
e_out->u = e->u;
//e_out->l = e->l;
e_out->t = e->t;
}
void stack_empty(stk_t *s)
{
#ifdef DEBUG
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
e->u = UNDEFINED;
//e.l = UNDEFINED;
e->t = UNDEFINED;
}
#endif
s->n = 0;
}
void stack_get_vertices(stk_t *s, index_t *uu)
{
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
uu[i] = e->u;
}
}
void stack_get_timestamps(stk_t *s, index_t *tt)
{
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
tt[i] = e->t;
}
}
#ifdef DEBUG
void print_stack(stk_t *s)
{
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "print stack\n");
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "size: %ld\n", s->size);
fprintf(stdout, "n: %ld\n", s->n);
fprintf(stdout, "a: ");
for(index_t i = 0; i < s->n; i++) {
stack_node_t *e = s->a + i;
fprintf(stdout, "[%ld, %ld, %ld]%s",
e->u==UNDEFINED ? UNDEFINED : e->u+1,
e->l, e->t, (i==s->n-1)?"\n":" ");
}
fprintf(stdout, "-----------------------------------------------\n");
}
void print_stacknode(stack_node_t *e)
{
fprintf(stdout, "print stack-node: [%ld, %ld, %ld]\n", e->u, e->l, e->t);
}
#endif
/*************************************************** Random numbers and such. */
index_t irand(void)
{
return (((index_t) rand())<<31)^((index_t) rand());
}
index_t randnum(index_t range)
{
return (((index_t) rand())<<31)^((index_t) rand()) % range;
}
void randseq(index_t n, index_t range, index_t seed, index_t *a)
{
ffprng_t base;
FFPRNG_INIT(base, seed);
index_t nt = num_threads();
index_t block_size = n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? n-1 : (start+block_size-1);
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t rs = (index_t) (rnd&0X7FFFFFFFFFFFFFFF);
a[i] = rs%range;
}
}
}
// returns a number in range <min, max-1>
index_t randrange(index_t min, index_t max)
{
return irand()%(max + 1 - min) + min;
}
void randshuffle_seq(index_t n, index_t *p, ffprng_t gen)
{
for(index_t i = 0; i < n-1; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t x = i+(rnd%(n-i));
index_t t = p[x];
p[x] = p[i];
p[i] = t;
}
}
void randperm(index_t n, index_t seed, index_t *p)
{
#ifdef BUILD_PARALLEL
index_t nt = 64;
#else
index_t nt = 1;
#endif
index_t block_size = n/nt;
index_t f[128][128];
assert(nt < 128);
ffprng_t base;
FFPRNG_INIT(base, seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
for(index_t j = 0; j < nt; j++)
f[t][j] = 0;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? n-1 : (start+block_size-1);
ffprng_t gen;
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t bin = (index_t) ((unsigned long) rnd)%((unsigned long)nt);
f[t][bin]++;
}
}
for(index_t bin = 0; bin < nt; bin++) {
for(index_t t = 1; t < nt; t++) {
f[0][bin] += f[t][bin];
}
}
index_t run = 0;
for(index_t j = 1; j <= nt; j++) {
index_t fp = f[0][j-1];
f[0][j-1] = run;
run += fp;
}
f[0][nt] = run;
FFPRNG_INIT(base, seed);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = 0;
index_t stop = n-1;
index_t pos = f[0][t];
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
index_t bin = (index_t) ((unsigned long) rnd)%((unsigned long)nt);
if(bin == t)
p[pos++] = i;
}
assert(pos == f[0][t+1]);
}
FFPRNG_INIT(base, (seed^0x9078563412EFDCABL));
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t fwd, gen;
index_t start = f[0][t];
index_t stop = f[0][t+1]-1;
index_t u;
FFPRNG_FWD(fwd, (1234567890123456L*t), base);
FFPRNG_RAND(u, fwd);
FFPRNG_INIT(gen, u);
randshuffle_seq(stop-start+1, p + start, gen);
}
}
void rand_nums(index_t seed, index_t n, index_t *p)
{
#ifdef BUILD_PARALLEL
index_t nt = num_threads();
index_t block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
srand(seed+th);
for(index_t i = start; i <= stop; i++)
p[i] = rand();
}
#else
srand(seed);
for(index_t i = 0; i < n; i++)
p[i] = rand();
#endif
}
/***************************************************** (Parallel) prefix sum. */
index_t prefixsum(index_t n, index_t *a, index_t k)
{
#ifdef BUILD_PARALLEL
index_t s[MAX_THREADS];
index_t nt = num_threads();
assert(nt < MAX_THREADS);
index_t length = n;
index_t block_size = length/nt;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t tsum = (stop-start+1)*k;
for(index_t u = start; u <= stop; u++)
tsum += a[u];
s[t] = tsum;
}
index_t run = 0;
for(index_t t = 1; t <= nt; t++) {
index_t v = s[t-1];
s[t-1] = run;
run += v;
}
s[nt] = run;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t trun = s[t];
for(index_t u = start; u <= stop; u++) {
index_t tv = a[u];
a[u] = trun;
trun += tv + k;
}
assert(trun == s[t+1]);
}
#else
index_t run = 0;
for(index_t u = 0; u < n; u++) {
index_t tv = a[u];
a[u] = run;
run += tv + k;
}
#endif
return run;
}
/************************************************************* Parallel sum. */
index_t parallelsum(index_t n, index_t *a)
{
index_t sum = 0;
#ifdef BUILD_PARALLEL
index_t s[MAX_THREADS];
index_t nt = num_threads();
assert(nt < MAX_THREADS);
index_t length = n;
index_t block_size = length/nt;
#pragma omp parallel for
for(index_t t = 0; t < nt; t++) {
index_t start = t*block_size;
index_t stop = (t == nt-1) ? length-1 : (start+block_size-1);
index_t tsum = 0;
for(index_t u = start; u <= stop; u++)
tsum += a[u];
s[t] = tsum;
}
for(index_t t = 0; t < nt; t++)
sum += s[t];
#else
for(index_t i = 0; i < n; i++) {
sum += a[i];
}
#endif
return sum;
}
/********************************** Initialize an array with random scalars. */
void randinits_scalar(scalar_t *a, index_t s, ffprng_scalar_t seed)
{
ffprng_t base;
FFPRNG_INIT(base, seed);
index_t nt = num_threads();
index_t block_size = s/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t t = 0; t < nt; t++) {
ffprng_t gen;
index_t start = t*block_size;
index_t stop = (t == nt-1) ? s-1 : (start+block_size-1);
FFPRNG_FWD(gen, start, base);
for(index_t i = start; i <= stop; i++) {
ffprng_scalar_t rnd;
FFPRNG_RAND(rnd, gen);
scalar_t rs = (scalar_t) rnd;
a[i] = rs;
}
}
}
/************************************************* Rudimentary graph builder. */
typedef struct
{
index_t is_directed;
index_t num_vertices;
index_t num_edges;
index_t max_time;
index_t edge_capacity;
index_t *edges;
index_t *colors;
} graph_t;
static index_t *enlarge(index_t m, index_t m_was, index_t *was)
{
assert(m >= 0 && m_was >= 0);
index_t *a = (index_t *) MALLOC(sizeof(index_t)*m);
index_t i;
if(was != (void *) 0) {
for(i = 0; i < m_was; i++) {
a[i] = was[i];
}
FREE(was);
}
return a;
}
graph_t *graph_alloc(index_t n)
{
assert(n >= 0);
index_t i;
graph_t *g = (graph_t *) MALLOC(sizeof(graph_t));
g->is_directed = 0; // default: undirected graph
g->num_vertices = n;
g->num_edges = 0;
g->edge_capacity = 100;
g->edges = enlarge(3*g->edge_capacity, 0, (index_t *) 0);
g->colors = (index_t *) MALLOC(sizeof(index_t)*n);
for(i = 0; i < n; i++)
g->colors[i] = UNDEFINED;
return g;
}
void graph_free(graph_t *g)
{
FREE(g->edges);
FREE(g->colors);
FREE(g);
}
void graph_add_edge(graph_t *g, index_t u, index_t v, index_t t)
{
assert(u >= 0 &&
v >= 0 &&
u < g->num_vertices &&
v < g->num_vertices);
assert(t>=0);
//assert(t>=0 && t < g->max_time);
if(g->num_edges == g->edge_capacity) {
g->edges = enlarge(6*g->edge_capacity, 3*g->edge_capacity, g->edges);
g->edge_capacity *= 2;
}
assert(g->num_edges < g->edge_capacity);
index_t *e = g->edges + 3*g->num_edges;
e[0] = u;
e[1] = v;
e[2] = t;
g->num_edges++;
}
index_t *graph_edgebuf(graph_t *g, index_t cap)
{
g->edges = enlarge(3*g->edge_capacity+3*cap, 3*g->edge_capacity, g->edges);
index_t *e = g->edges + 3*g->num_edges;
g->edge_capacity += cap;
g->num_edges += cap;
return e;
}
void graph_set_color(graph_t *g, index_t u, index_t c)
{
assert(u >= 0 && u < g->num_vertices && c >= 0);
g->colors[u] = c;
}
void graph_set_is_directed(graph_t *g, index_t is_dir)
{
assert(is_dir == 0 || is_dir == 1);
g->is_directed = is_dir;
}
void graph_set_max_time(graph_t *g, index_t tmax)
{
assert(tmax > 0);
g->max_time = tmax;
}
#ifdef DEBUG
void print_graph(graph_t *g)
{
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
fprintf(stdout, "p motif %ld %ld %ld\n", n, m, tmax);
index_t *e = g->edges;
for(index_t i = 0; i < 3*m; i+=3) {
fprintf(stdout, "e %ld %ld %ld\n",
e[i]+1, e[i+1]+1, e[i+2]+1);
}
index_t *c = g->colors;
for(index_t i = 0; i < n; i++)
fprintf(stdout, "n %ld %ld\n", i+1, c[i]+1);
}
#endif
/************************************* Basic motif query processing routines. */
struct temppathq_struct
{
index_t is_stub;
index_t n;
index_t k;
index_t tmax;
index_t *pos;
index_t *adj;
index_t nl;
index_t *l;
index_t ns;
shade_map_t *shade;
index_t *color;
};
typedef struct temppathq_struct temppathq_t;
void adjsort(index_t n, index_t *pos, index_t *adj)
{
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
index_t pu = pos[u];
index_t deg = adj[pu];
heapsort_indext(deg, adj + pu + 1);
}
}
void temppathq_free(temppathq_t *q)
{
if(!q->is_stub) {
FREE(q->pos);
FREE(q->adj);
FREE(q->l);
FREE(q->shade);
FREE(q->color);
}
FREE(q);
}
#ifdef DEBUG
void print_temppathq(temppathq_t *root)
{
index_t n = root->n;
index_t k = root->k;
index_t tmax = root->tmax;
index_t *pos = root->pos;
index_t *adj = root->adj;
fprintf(stdout, "-----------------------------------------------\n");
fprintf(stdout, "printing temppathq\n");
fprintf(stdout, "is_stub = %ld\n", root->is_stub);
fprintf(stdout, "n = %ld\n", n);
fprintf(stdout, "k = %ld\n", k);
fprintf(stdout, "tmax = %ld\n", tmax);
fprintf(stdout, "pos\n");
fprintf(stdout, "----\n ");
for(index_t i = 0; i < n*tmax; i++) {
fprintf(stdout, "%ld%s", pos[i], i%n==n-1 ? "\n ":" ");
}
fprintf(stdout, "adjacency list:\n");
fprintf(stdout, "---------------\n");
for(index_t t = 0; t < tmax; t++) {
fprintf(stdout, "t: %ld\n", t+1);
fprintf(stdout, "---------------\n");
index_t *pos_t = pos + n*t;
for(index_t u = 0; u < n; u++) {
index_t pu = pos_t[u];
index_t nu = adj[pu];
index_t *adj_u = adj + pu + 1;
fprintf(stdout, "%4ld:", u+1);
for(index_t i = 0; i < nu; i++) {
fprintf(stdout, " %4ld", adj_u[i]+1);
}
fprintf(stdout, "\n");
}
}
index_t nl = root->nl;
index_t *l = root->l;
fprintf(stdout, "nl = %ld\n", nl);
fprintf(stdout, "l:\n");
for(index_t i = 0; i < nl; i++)
fprintf(stdout, "%8ld : %8ld\n", nl, l[i]);
index_t ns = root ->ns;
shade_map_t *shade = root->shade;
fprintf(stdout, "ns : %ld\n", ns);
fprintf(stdout, "shades:\n");
for(index_t u = 0; u < n; u++)
fprintf(stdout, "%10ld : 0x%08X\n", u+1, shade[u]);
index_t *color = root->color;
fprintf(stdout, "color:\n");
for(index_t u = 0; u < n; u++)
fprintf(stdout, "%10ld: %4ld\n", u+1, color[u]);
fprintf(stdout, "-----------------------------------------------\n");
}
void print_array(const char *name, index_t n, index_t *a, index_t offset)
{
fprintf(stdout, "%s (%ld):", name, n);
for(index_t i = 0; i < n; i++) {
fprintf(stdout, " %ld", a[i] == -1 ? -1 : a[i]+offset);
}
fprintf(stdout, "\n");
}
#endif
/******************************************************** Root query builder. */
// Query builder for directed graphs
//
temppathq_t *build_temppathq_dir(graph_t *g, index_t k, index_t *kk)
{
push_memtrack();
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
index_t *pos = alloc_idxtab(n*tmax);
index_t *adj = alloc_idxtab(n*tmax+2*m);
index_t ns = k;
shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
index_t *color = alloc_idxtab(n);
temppathq_t *root = (temppathq_t *) MALLOC(sizeof(temppathq_t));
root->is_stub = 0;
root->n = g->num_vertices;
root->k = k;
root->tmax = tmax;
root->pos = pos;
root->adj = adj;
root->nl = 0;
root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl);
root->ns = ns;
root->shade = shade;
root->color = color;
assert(tmax >= k-1);
push_time();
fprintf(stdout, "build query: ");
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
pos[u] = 0;
double time = pop_time();
fprintf(stdout, "[zero: %.2lf ms] ", time);
fflush(stdout);
push_time();
index_t *e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel occurrence count
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
index_t nt = num_threads();
index_t block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
//index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = (pos + (n*t));
//if(start <= u && u <= stop) {
// // I am responsible for u, record adjacency to u
// pos_t[u]++;
//}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
pos_t[v]++;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
//index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = pos + n*t;
//pos_t[u]++;
pos_t[v]++;
}
#endif
index_t run = prefixsum(n*tmax, pos, 1);
assert(run == (n*tmax+m));
time = pop_time();
fprintf(stdout, "[pos: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
adj[pos[u]] = 0;
e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel aggregation to bins
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
nt = num_threads();
block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
//if(start <= u && u <= stop) {
// // I am responsible for u, record adjacency to u
// index_t pu = pos[n*t+u];
// adj[pu + 1 + adj[pu]++] = v;
//}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
index_t pv = pos[n*t+v];
adj[pv + 1 + adj[pv]++] = u;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
//index_t pu = pos[n*t+u];
index_t pv = pos[n*t+v];
//adj[pu + 1 + adj[pu]++] = v;
adj[pv + 1 + adj[pv]++] = u;
}
#endif
time = pop_time();
fprintf(stdout, "[adj: %.2lf ms] ", time);
fflush(stdout);
//print_temppathq(root);
push_time();
adjsort(n*tmax, pos, adj);
time = pop_time();
fprintf(stdout, "[adjsort: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
shade_map_t s = 0;
for(index_t j = 0; j < k; j++)
if(g->colors[u] == kk[j])
s |= 1UL << j;
shade[u] = s;
//fprintf(stdout, "%4ld: 0x%08X\n", u, shade[u]);
}
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
color[u] = g->colors[u];
time = pop_time();
fprintf(stdout, "[shade: %.2lf ms] ", time);
fflush(stdout);
time = pop_time();
fprintf(stdout, "done. [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
return root;
}
// Query builder for undirected graphs
//
temppathq_t *build_temppathq(graph_t *g, index_t k, index_t *kk)
{
push_memtrack();
index_t n = g->num_vertices;
index_t m = g->num_edges;
index_t tmax = g->max_time;
index_t *pos = alloc_idxtab(n*tmax);
index_t *adj = alloc_idxtab(n*tmax+2*m);
index_t ns = k;
shade_map_t *shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*n);
index_t *color = alloc_idxtab(n);
temppathq_t *root = (temppathq_t *) MALLOC(sizeof(temppathq_t));
root->is_stub = 0;
root->n = g->num_vertices;
root->k = k;
root->tmax = tmax;
root->pos = pos;
root->adj = adj;
root->nl = 0;
root->l = (index_t *) MALLOC(sizeof(index_t)*root->nl);
root->ns = ns;
root->shade = shade;
root->color = color;
assert(tmax >= k-1);
push_time();
fprintf(stdout, "build query: ");
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++)
pos[u] = 0;
double time = pop_time();
fprintf(stdout, "[zero: %.2lf ms] ", time);
fflush(stdout);
push_time();
index_t *e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel occurrence count
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
index_t nt = num_threads();
index_t block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = (pos + (n*t));
if(start <= u && u <= stop) {
// I am responsible for u, record adjacency to u
pos_t[u]++;
}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
pos_t[v]++;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j];
index_t v = e[j+1];
index_t t = e[j+2];
index_t *pos_t = pos + n*t;
pos_t[u]++;
pos_t[v]++;
}
#endif
index_t run = prefixsum(n*tmax, pos, 1);
assert(run == (n*tmax+2*m));
time = pop_time();
fprintf(stdout, "[pos: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n*tmax; u++) {
adj[pos[u]] = 0;
}
e = g->edges;
#ifdef BUILD_PARALLEL
// Parallel aggregation to bins
// -- each thread is responsible for a group of bins,
// all threads scan the entire list of edges
nt = num_threads();
block_size = n/nt;
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
if(start <= u && u <= stop) {
// I am responsible for u, record adjacency to u
index_t pu = pos[n*t+u];
adj[pu + 1 + adj[pu]++] = v;
}
if(start <= v && v <= stop) {
// I am responsible for v, record adjacency to v
index_t pv = pos[n*t+v];
adj[pv + 1 + adj[pv]++] = u;
}
}
}
#else
for(index_t j = 0; j < 3*m; j+=3) {
index_t u = e[j+0];
index_t v = e[j+1];
index_t t = e[j+2];
index_t pu = pos[n*t+u];
index_t pv = pos[n*t+v];
adj[pu + 1 + adj[pu]++] = v;
adj[pv + 1 + adj[pv]++] = u;
}
#endif
time = pop_time();
fprintf(stdout, "[adj: %.2lf ms] ", time);
fflush(stdout);
//print_temppathq(root);
push_time();
adjsort(n*tmax, pos, adj);
time = pop_time();
fprintf(stdout, "[adjsort: %.2lf ms] ", time);
fflush(stdout);
push_time();
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++) {
shade_map_t s = 0;
for(index_t j = 0; j < k; j++)
if(g->colors[u] == kk[j])
s |= 1UL << j;
shade[u] = s;
// fprintf(stdout, "%4ld: 0x%08X\n", u, shade[u]);
}
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < n; u++)
color[u] = g->colors[u];
time = pop_time();
fprintf(stdout, "[shade: %.2lf ms] ", time);
fflush(stdout);
time = pop_time();
fprintf(stdout, "done. [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
return root;
}
void query_pre_mk1(temppathq_t *in, temppathq_t **out_q, index_t **out_map)
{
push_memtrack();
index_t nt = num_threads();
index_t i_n = in->n;
index_t k = in->k;
index_t tmax = in->tmax;
index_t *i_pos = in->pos;
index_t *i_adj = in->adj;
index_t ns = in->ns;
shade_map_t *i_shade = in->shade;
index_t *i_color = in->color;
push_time();
fprintf(stdout, "query pre [1]: ");
fflush(stdout);
push_time();
// input-to-output vertex map
index_t *v_map_i2o = (index_t *) MALLOC(sizeof(index_t)*i_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++)
v_map_i2o[u] = UNDEFINED;
index_t v_cnt = 0;
#ifdef BUILD_PARALLEL
// parallely construct input-to-output vertex map
index_t block_size = i_n/nt;
index_t t_vcnt[nt];
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
t_vcnt[th] = 0;
for(index_t u = start; u <= stop; u++) {
if(i_shade[u])
v_map_i2o[u] = t_vcnt[th]++;
}
}
// prefix sum
for(index_t th = 1; th < nt; th++)
t_vcnt[th] += t_vcnt[th-1];
#pragma omp parallel for
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
index_t tsum = (th==0 ? 0 : t_vcnt[th-1]);
for(index_t u = start; u <= stop; u++) {
if(i_shade[u])
v_map_i2o[u] += tsum;
}
}
v_cnt = t_vcnt[nt-1];
#else
// serially construct input-to-output vertex map
for(index_t u = 0; u < i_n; u++) {
if(i_shade[u])
v_map_i2o[u] = v_cnt++;
}
#endif
// output-to-input vertex map
// required to reconstruct solution in original graph
index_t o_n = v_cnt;
index_t *v_map_o2i = (index_t *) MALLOC(sizeof(index_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
v_map_o2i[o_u] = u;
}
fprintf(stdout, "[map: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output position list
index_t *o_pos = alloc_idxtab(o_n*tmax);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_pos[u] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_pos_t[o_u]++;
}
}
}
}
index_t o_m = parallelsum(o_n*tmax, o_pos);
index_t run = prefixsum(o_n*tmax, o_pos, 1);
assert(run == (o_n*tmax+o_m));
fprintf(stdout, "[pos: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output adjacency list
index_t *o_adj = alloc_idxtab(o_n*tmax + o_m);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_adj[o_pos[u]] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
index_t o_pu = o_pos_t[o_u];
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_adj[o_pu + 1 + o_adj[o_pu]++] = o_v;
}
}
}
}
fprintf(stdout, "[adj: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output shade map
shade_map_t *o_shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
o_shade[o_u] = i_shade[u];
}
// output color
index_t *o_color = alloc_idxtab(o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
o_color[o_u] = i_color[u];
}
fprintf(stdout, "[shade: %.2lf ms] ", pop_time());
fflush(stdout);
temppathq_t *out = (temppathq_t *) MALLOC(sizeof(temppathq_t));
out->is_stub = 0;
out->n = o_n;
out->k = k;
out->tmax = tmax;
out->pos = o_pos;
out->adj = o_adj;
out->nl = 0;
out->l = (index_t *) MALLOC(sizeof(index_t)*out->nl);
out->ns = ns;
out->shade = o_shade;
out->color = o_color;
*out_q = out;
*out_map = v_map_o2i;
FREE(v_map_i2o);
fprintf(stdout, "done. [%.2lf ms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
}
/****************************************************** Input reader (ASCII). */
void skipws(FILE *in)
{
int c;
do {
c = fgetc(in);
if(c == '#') {
do {
c = fgetc(in);
} while(c != EOF && c != '\n');
}
} while(c != EOF && isspace(c));
if(c != EOF)
ungetc(c, in);
}
#define CMD_NOP 0
#define CMD_TEST_UNIQUE 1
#define CMD_TEST_COUNT 2
#define CMD_RUN_ORACLE 3
#define CMD_LIST_FIRST 4
#define CMD_LIST_ALL 5
#define CMD_BASE_TEMPPATH 6
#define CMD_BASE_PATHMOTIF 7
#define CMD_BASE_DFS 8
const char *cmd_legend[] = { "no operation",
"test unique",
"test count",
"run oracle",
"list first",
"list all",
"baseline temppath",
"baseline pathmotif",
"baseline dfs"};
void reader_ascii(FILE *in,
graph_t **g_out, index_t *k_out, index_t **kk_out,
index_t *cmd_out, index_t **cmd_args_out)
{
push_time();
push_memtrack();
index_t n = 0;
index_t m = 0;
index_t tmax = 0;
index_t is_dir = 0;
graph_t *g = (graph_t *) 0;
index_t *kk = (index_t *) 0;
index_t cmd = CMD_NOP;
index_t *cmd_args = (index_t *) 0;
index_t i, j, d, k, t;
skipws(in);
while(!feof(in)) {
skipws(in);
int c = fgetc(in);
switch(c) {
case 'p':
if(g != (graph_t *) 0)
ERROR("duplicate parameter line");
skipws(in);
if(fscanf(in, "motif %ld %ld %ld %ld", &n, &m, &tmax, &is_dir) != 4)
ERROR("invalid parameter line");
if(n <= 0 || m < 0 ) {
ERROR("invalid input parameters (n = %ld, m = %ld, tmax = %ld)",
n, m, tmax);
}
g = graph_alloc(n);
graph_set_is_directed(g, is_dir);
graph_set_max_time(g, tmax);
break;
case 'e':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before edges");
skipws(in);
if(fscanf(in, "%ld %ld %ld", &i, &j, &t) != 3)
ERROR("invalid edge line");
//if(i < 1 || i > n || j < 1 || j > n || t < 1 || t > tmax) {
// ERROR("invalid edge (i = %ld, j = %ld t = %ld with n = %ld, tmax = %ld)",
// i, j, t, n, tmax);
//}
graph_add_edge(g, i-1, j-1, t-1);
break;
case 'n':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before vertex colors");
skipws(in);
if(fscanf(in, "%ld %ld", &i, &d) != 2)
ERROR("invalid color line");
if(i < 1 || i > n || d < 1)
ERROR("invalid color line (i = %ld, d = %ld with n = %ld)",
i, d, n);
graph_set_color(g, i-1, d-1);
break;
case 'k':
if(g == (graph_t *) 0)
ERROR("parameter line must be given before motif");
skipws(in);
if(fscanf(in, "%ld", &k) != 1)
ERROR("invalid motif line");
if(k < 1 || k > n)
ERROR("invalid motif line (k = %ld with n = %d)", k, n);
kk = alloc_idxtab(k);
for(index_t u = 0; u < k; u++) {
skipws(in);
if(fscanf(in, "%ld", &i) != 1)
ERROR("error parsing motif line");
if(i < 1)
ERROR("invalid color on motif line (i = %ld)", i);
kk[u] = i-1;
}
break;
case 't':
if(g == (graph_t *) 0 || kk == (index_t *) 0)
ERROR("parameter and motif lines must be given before test");
skipws(in);
{
char cmdstr[128];
if(fscanf(in, "%100s", cmdstr) != 1)
ERROR("invalid test command");
if(!strcmp(cmdstr, "unique")) {
cmd_args = alloc_idxtab(k);
for(index_t u = 0; u < k; u++) {
skipws(in);
if(fscanf(in, "%ld", &i) != 1)
ERROR("error parsing test line");
if(i < 1 || i > n)
ERROR("invalid test line entry (i = %ld)", i);
cmd_args[u] = i-1;
}
heapsort_indext(k, cmd_args);
for(index_t u = 1; u < k; u++)
if(cmd_args[u-1] >= cmd_args[u])
ERROR("test line contains duplicate entries");
cmd = CMD_TEST_UNIQUE;
} else {
if(!strcmp(cmdstr, "count")) {
cmd_args = alloc_idxtab(1);
skipws(in);
if(fscanf(in, "%ld", &i) != 1)
ERROR("error parsing test line");
if(i < 0)
ERROR("count on test line cannot be negative");
cmd = CMD_TEST_COUNT;
cmd_args[0] = i;
} else {
ERROR("unrecognized test command \"%s\"", cmdstr);
}
}
}
break;
case EOF:
break;
default:
ERROR("parse error");
}
}
if(g == (graph_t *) 0)
ERROR("no graph given in input");
if(kk == (index_t *) 0)
ERROR("no motif given in input");
for(index_t i = 0; i < n; i++) {
if(g->colors[i] == -1)
ERROR("no color assigned to vertex i = %ld", i);
}
double time = pop_time();
fprintf(stdout,
"input: n = %ld, m = %ld, k = %ld, t = %ld [%.2lf ms] ",
g->num_vertices,
g->num_edges,
k,
g->max_time,
time);
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
*g_out = g;
*k_out = k;
*kk_out = kk;
*cmd_out = cmd;
*cmd_args_out = cmd_args;
}
/******************************************************************************/
void get_subgraph(index_t *kk, temppathq_t *in, temppathq_t **out_q,
index_t **out_map)
{
push_memtrack();
index_t nt = num_threads();
index_t i_n = in->n;
index_t k = in->k;
index_t tmax = in->tmax;
index_t *i_pos = in->pos;
index_t *i_adj = in->adj;
index_t ns = in->ns;
shade_map_t *i_shade = in->shade;
index_t *i_color = in->color;
// output graph
index_t o_n = k;
push_time();
fprintf(stdout, "get_subgraph: ");
fflush(stdout);
//shellsort(n, kk);
push_time();
// input-to-output vertex map
index_t *v_map_i2o = (index_t *) MALLOC(sizeof(index_t)*i_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++)
v_map_i2o[u] = UNDEFINED;
// serially construct input-to-output vertex map
for(index_t i = 0; i < k; i++)
v_map_i2o[kk[i]] = i;
// output-to-input vertex map
// required to reconstruct solution in original graph
index_t *v_map_o2i = (index_t *) MALLOC(sizeof(index_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t i = 0; i < o_n; i++) {
v_map_o2i[i] = kk[i];
}
fprintf(stdout, "[map: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output position list
index_t *o_pos = alloc_idxtab(o_n*tmax);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_pos[u] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_pos_t[o_u]++;
}
}
}
}
index_t o_m = parallelsum(o_n*tmax, o_pos);
index_t run = prefixsum(o_n*tmax, o_pos, 1);
assert(run == (o_n*tmax+o_m));
fprintf(stdout, "[pos: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output adjacency list
index_t *o_adj = alloc_idxtab(o_n*tmax + o_m);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < o_n*tmax; u++)
o_adj[o_pos[u]] = 0;
for(index_t t = 0; t < tmax; t++) {
index_t *o_pos_t = o_pos + o_n*t;
index_t *i_pos_t = i_pos + i_n*t;
index_t block_size = i_n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? i_n-1 : (start+block_size-1);
for(index_t u = start; u <= stop; u++) {
index_t o_u = v_map_i2o[u];
if(o_u == UNDEFINED) continue;
index_t i_pu = i_pos_t[u];
index_t i_nu = i_adj[i_pu];
index_t *i_adj_u = i_adj + i_pu;
index_t o_pu = o_pos_t[o_u];
for(index_t j = 1; j <= i_nu; j++) {
index_t v = i_adj_u[j];
index_t o_v = v_map_i2o[v];
if(o_v == UNDEFINED) continue;
o_adj[o_pu + 1 + o_adj[o_pu]++] = o_v;
}
}
}
}
fprintf(stdout, "[adj: %.2lf ms] ", pop_time());
fflush(stdout);
push_time();
// output shade map
shade_map_t *o_shade = (shade_map_t *) MALLOC(sizeof(shade_map_t)*o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
o_shade[o_u] = i_shade[u];
}
index_t *o_color = alloc_idxtab(o_n);
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t u = 0; u < i_n; u++) {
index_t o_u = v_map_i2o[u];
if(o_u != UNDEFINED)
o_color[o_u] = i_color[u];
}
fprintf(stdout, "[shade: %.2lf ms] ", pop_time());
fflush(stdout);
temppathq_t *out = (temppathq_t *) MALLOC(sizeof(temppathq_t));
out->is_stub = 0;
out->n = o_n;
out->k = k;
out->tmax = tmax;
out->pos = o_pos;
out->adj = o_adj;
out->nl = 0;
out->l = (index_t *) MALLOC(sizeof(index_t)*out->nl);
out->ns = ns;
out->shade = o_shade;
out->color = o_color;
*out_q = out;
*out_map = v_map_o2i;
FREE(v_map_i2o);
fprintf(stdout, "done. [%.2lf ms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
}
/*****************************************************************************/
index_t temp_dfs(index_t n, index_t k, index_t t_max, index_t *pos,
index_t *adj, index_t *color, index_t *kk_in,
index_t *in_stack, stk_t *s, index_t *uu_out,
index_t *tt_out, index_t *t_opt)
{
if(s->n >= k) {
// reached depth k
assert(s->n <= k);
// allocate memory
index_t *uu_sol = (index_t *) malloc(k*sizeof(index_t));
index_t *kk_sol = (index_t *) malloc(k*sizeof(index_t));
index_t *tt_sol = (index_t *) malloc(k*sizeof(index_t));
// get vertices in stack
stack_get_vertices(s, uu_sol);
stack_get_timestamps(s, tt_sol);
// get vertex colors
for(index_t i = 0; i < k; i++)
kk_sol[i] = color[uu_sol[i]];
shellsort(k, kk_sol);
// check if colors match
index_t is_motif = 1;
for(index_t i = 0; i < k; i++) {
if(kk_sol[i] != kk_in[i]) {
is_motif = 0;
break;
}
}
// match found
if(is_motif) {
stack_node_t e;
stack_top(s, &e);
if(*t_opt > e.t) {
// copy solution vertices
for(index_t i = 0; i < k; i++)
uu_out[i] = uu_sol[i];
// copy solution timestamps
for(index_t i = 0; i < k; i++)
tt_out[i] = tt_sol[i];
*t_opt = e.t;
}
}
// free memory
free(uu_sol);
free(kk_sol);
free(tt_sol);
return 1;
} else {
// get stack-top
stack_node_t e;
stack_top(s, &e);
index_t u = e.u;
//index_t l = e.l;
index_t t_min = e.t;
// proceed with temporal DFS
for(index_t t = t_min; t < t_max; t++) {
index_t *pos_t = pos + t*n;
index_t pu = pos_t[u];
index_t nu = adj[pu];
if(nu == 0) continue;
index_t *adj_u = adj + pu;
for(index_t i = 1; i <= nu; i++) {
index_t v = adj_u[i];
if(in_stack[v]) continue;
stack_node_t e;
e.u = v;
//e.l = l+1;
e.t = t+1;
stack_push(s, &e);
in_stack[v] = 1;
// recursive call to depth k
temp_dfs(n, k, t_max, pos, adj, color, kk_in, in_stack, s,
uu_out, tt_out, t_opt);
stack_pop(s, &e);
in_stack[v] = 0;
}
}
}
return 1; // not found
}
index_t baseline_dfs(index_t seed, temppathq_t *root, index_t *kk)
{
push_time();
// thread count
index_t nt = num_threads();
// init
index_t n = root->n;
index_t k = root->k;
index_t t_max = root->tmax;
index_t *pos = root->pos;
index_t *adj = root->adj;
index_t *color = root->color;
// allocate memory
index_t *uu_sol_nt = alloc_idxtab(nt*k);
index_t *tt_sol_nt = alloc_idxtab(nt*k);
index_t *in_stack_nt = alloc_idxtab(nt*n);
index_t *t_opt_nt = alloc_idxtab(nt);
// initialise and time-it
push_time();
shellsort(k, kk);
index_t *v_seq = alloc_idxtab(n);
randperm(n, seed, v_seq);
double init_time = pop_time();
push_time();
index_t block_size = n/nt;
#ifdef BUILD_PARALLEL
#pragma omp parallel for
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = th*block_size;
index_t stop = (th == nt-1) ? n-1 : (start+block_size-1);
stk_t *s = stack_alloc(k); // allocate stack
// memory for each thread
index_t *in_stack = in_stack_nt + th*n;
index_t *uu_sol = uu_sol_nt + th*k;
index_t *tt_sol = tt_sol_nt + th*k;
index_t *t_opt = t_opt_nt + th;
// initialise optimal-time to infinity
*t_opt = MATH_INF;
// each thread handle a set of vertices
for(index_t j = start; j <= stop; j++) {
// initialise stack to empty
for(index_t i = 0; i < n; i++)
in_stack[i] = 0;
index_t u = v_seq[j];
stack_node_t e;
e.u = u;
//e.l = 1;
e.t = 0;
stack_push(s, &e);
in_stack[u] = 1;
// perform temporal-DFS
temp_dfs(n, k, t_max, pos, adj, color, kk, in_stack, s, uu_sol,
tt_sol, t_opt);
// empty stack
stack_empty(s);
}
stack_free(s);
}
// obtain global optimum using optimal values from each thread
index_t *uu_opt = (index_t *) MALLOC(k*sizeof(index_t));
index_t *tt_opt = (index_t *) MALLOC(k*sizeof(index_t));
index_t t_opt = MATH_INF;
for(index_t th = 0; th < nt; th++) {
index_t *uu_sol = uu_sol_nt + th*k;
index_t *tt_sol = tt_sol_nt + th*k;
if(t_opt > t_opt_nt[th]) {
t_opt = t_opt_nt[th];
for(index_t i = 0; i < k; i++)
uu_opt[i] = uu_sol[i];
for(index_t i = 0; i < k; i++)
tt_opt[i] = tt_sol[i];
}
}
double dfs_time = pop_time();
// output solution
index_t found = 0;
if(t_opt != MATH_INF) {
found = 1;
fprintf(stdout, "solution [%ld, %.2lfms]: ", t_opt, dfs_time);
for(index_t i = 0; i < k-1; i++) {
index_t u = uu_opt[i];
index_t v = uu_opt[i+1];
index_t t = tt_opt[i+1];
fprintf(stdout, "[%ld, %ld, %ld]%s", u+1, v+1, t, i==k-2?"\n":" ");
}
}
FREE(in_stack_nt);
FREE(uu_sol_nt);
FREE(tt_sol_nt);
FREE(t_opt_nt);
FREE(tt_opt);
FREE(uu_opt);
FREE(v_seq);
fprintf(stdout, "baseline [dfs]: [init: %.2lf ms] [dfs: %.2lf ms] done."
" [%.2lf ms] -- %s\n"
,init_time, dfs_time, pop_time(), found?"true":"false");
return found;
}
/*****************************************************************************/
static void random_tempwalk(index_t seed, index_t n, index_t k, index_t tmax,
index_t *pos, index_t *adj, index_t *uu_sol,
index_t *tt_sol)
{
for(index_t i = 0; i < k; i++)
uu_sol[i] = UNDEFINED;
for(index_t i = 0; i < k; i++)
tt_sol[i] = UNDEFINED;
srand(seed);
index_t s = irand()%n; // pick a random start vertex
uu_sol[0] = s; // initialise walk with start vertex
tt_sol[0] = 0; // always start with 0 timestamp
for(index_t l = 0; l < k-1; l++) {
index_t u = uu_sol[l];
index_t tu = tt_sol[l];
if(tu >= tmax-1) break;
index_t t = randrange(tu+1, tmax-1);
//assert(t>tu && t<tmax);
index_t *pos_t = pos + t*n;
index_t pu = pos_t[u];
index_t nu = adj[pu];
if(nu == 0) break;
index_t *adj_u = adj + pu + 1;
index_t i = randrange(0, nu-1);
index_t v = adj_u[i];
uu_sol[l+1] = v;
tt_sol[l+1] = t;
}
}
index_t baseline_randwalk_path(index_t max_itr, temppathq_t *root, index_t *kk,
index_t **uu_out, index_t **tt_out)
{
push_time();
push_memtrack();
index_t n = root->n;
index_t k = root->k;
index_t tmax = root->tmax;
index_t *pos = root->pos;
index_t *adj = root->adj;
index_t *uu_sol = alloc_idxtab(k);
index_t *tt_sol = alloc_idxtab(k);
index_t path_found = 0;
for(index_t itr = 0; itr < max_itr; itr++) {
push_time();
index_t seed = irand();
random_tempwalk(seed, n, k, tmax, pos, adj, uu_sol, tt_sol);
// check if the walk is temporal
index_t is_temp = 1;
for(index_t i = 0; i < k-1; i++) {
if(tt_sol[i] >= tt_sol[i+1]) {
is_temp= 0;
break;
}
}
if(!is_temp) {
fprintf(stdout, "%10ld : [%7ld %.4lfms] [not-path]\n",
itr+1, uu_sol[0]+1, pop_time());
fflush(stdout);
continue;
}
// check if the walk is a path
// sort and check if any two consecutive vertices are same
index_t sol_temp[k];
for(index_t i = 0; i < k; i++)
sol_temp[i] = uu_sol[i];
shellsort(k, sol_temp);
index_t is_path = 1;
for(index_t i = 0; i < k-1; i++) {
if(sol_temp[i] == UNDEFINED || (sol_temp[i] == sol_temp[i+1])) {
is_path = 0;
break;
}
}
if(!is_path) {
fprintf(stdout, "%10ld : [%7ld %.4lfms] [not-path]\n",
itr+1, uu_sol[0]+1, pop_time());
fflush(stdout);
continue;
} else {
path_found = 1;
fprintf(stdout, "%10ld : [%7ld %.4lfms] [path-found]\n",
itr+1, uu_sol[0]+1, pop_time());
break;
}
}
*uu_out = uu_sol;
*tt_out = tt_sol;
fprintf(stdout, "baseline-temppath: [%.4lfms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
return path_found;
}
index_t baseline_randwalk_motif(index_t seed, index_t max_itr,
temppathq_t *root, index_t *kk,
index_t *uu_out, index_t *tt_out)
{
push_time();
push_memtrack();
index_t nt = num_threads();
index_t n = root->n;
index_t k = root->k;
index_t tmax = root->tmax;
index_t *pos = root->pos;
index_t *adj = root->adj;
index_t *color = root->color;
index_t *uu_tmp_nt = alloc_idxtab(k*nt);
index_t *tt_tmp_nt = alloc_idxtab(k*nt);
index_t *kk_tmp_nt = alloc_idxtab(k*nt);
index_t *rand_seeds = alloc_idxtab(max_itr);
// memory to store one solution per thread
index_t *uu_sol_nt = alloc_idxtab(k*nt);
index_t *tt_sol_nt = alloc_idxtab(k*nt);
index_t *kk_sol_nt = alloc_idxtab(k*nt);
index_t *t_opt_nt = alloc_idxtab(nt);
shellsort(k, kk);
// random seed for each iteration
rand_nums(seed, max_itr, rand_seeds);
index_t block_size = max_itr/nt;
index_t th_id = UNDEFINED;
volatile index_t found = 0;
#ifdef BUILD_PARALLEL
#pragma omp parallel for shared(found)
#endif
for(index_t th = 0; th < nt; th++) {
index_t start = 0;
index_t stop = (th == nt-1) ? max_itr-1 : (start+block_size-1);
index_t *uu_tmp = uu_tmp_nt + th*k;
index_t *tt_tmp = tt_tmp_nt + th*k;
index_t *kk_tmp = kk_tmp_nt + th*k;
index_t *uu_sol = uu_sol_nt + th*k;
index_t *tt_sol = tt_sol_nt + th*k;
index_t *kk_sol = kk_sol_nt + th*k;
index_t *t_opt = t_opt_nt + th;
t_opt = MATH_INF; // initialise to MAX TIME
for(index_t itr = start; itr <= stop; itr++) {
//push_time();
if(found) break;
index_t seed_itr = rand_seeds[itr];
random_tempwalk(seed_itr, n, k, tmax, pos, adj, uu_tmp, tt_tmp);
// check if the walk is temporal
index_t is_temp = 1;
for(index_t i = 0; i < k-1; i++) {
if(tt_tmp[i] >= tt_tmp[i+1]) {
is_temp= 0;
break;
}
}
// continue, if walk is not temporal
if(!is_temp) continue;
// check if the walk is a path
// sort and check if any two consecutive vertices are same
index_t sol_temp[k];
for(index_t i = 0; i < k; i++)
sol_temp[i] = uu_tmp[i];
shellsort(k, sol_temp);
index_t is_path = 1;
for(index_t i = 0; i < k-1; i++) {
if(sol_temp[i] == UNDEFINED || (sol_temp[i] == sol_temp[i+1])) {
is_path = 0;
break;
}
}
// continue, if walk is not a path
if(!is_path) continue;
// check if the colors in multiset match with path
for(index_t i = 0; i < k; i++) {
kk_tmp[i] = color[uu_tmp[i]];
//kk_tmp[i] = __builtin_ffs(shade[uu_tmp[i]]);
}
// sort colors of math
shellsort(k, kk_tmp);
// check if the colors match
index_t kk_match = 1;
for(index_t i = 0; i < k; i++) {
if(kk[i] != kk_tmp[i]) {
kk_match = 0;
break;
}
}
// continue, if colors do not match
if(!kk_match) continue;
// found a motif match
if(is_path && kk_match) {
found = 1;
th_id = th;
continue;
// TODO: update solution if the max-time is less than the
// current solution
}
}
}
if(found) {
assert(th_id != UNDEFINED);
index_t *uu_tmp = uu_tmp_nt + k*th_id;
index_t *tt_tmp = kk_tmp_nt + k*th_id;
for(index_t i = 0; i < k; i++)
uu_out[i] = uu_tmp[i];
for(index_t i = 0; i < k; i++)
tt_out[i] = tt_tmp[i];
}
FREE(uu_tmp_nt);
FREE(tt_tmp_nt);
FREE(kk_tmp_nt);
FREE(uu_sol_nt);
FREE(tt_sol_nt);
FREE(kk_sol_nt);
FREE(t_opt_nt);
FREE(color);
FREE(rand_seeds);
fprintf(stdout, "baseline-temppath: [%.4lfms] ", pop_time());
print_pop_memtrack();
fprintf(stdout, " ");
print_current_mem();
fprintf(stdout, "\n");
fflush(stdout);
return found;
}
/******************************************************* Program entry point. */
#define PRE_NOP 0
#define PRE_MK1 1
int main(int argc, char **argv)
{
push_time();
push_memtrack();
index_t precomp = PRE_NOP;
index_t arg_cmd = CMD_NOP;
index_t max_itr = UNDEFINED;
index_t have_seed = 0;
index_t have_input = 0;
index_t seed = 123456789;
char *filename = (char *) 0;
for(index_t f = 1; f < argc; f++) {
if(argv[f][0] == '-') {
if(!strcmp(argv[f], "-bin")) {
flag_bin_input = 1;
}
if(!strcmp(argv[f], "-ascii")) {
flag_bin_input = 0;
}
if(!strcmp(argv[f], "-temppath")) {
arg_cmd = CMD_BASE_TEMPPATH;
}
if(!strcmp(argv[f], "-pathmotif")) {
arg_cmd = CMD_BASE_PATHMOTIF;
}
if(!strcmp(argv[f], "-dfs")) {
arg_cmd = CMD_BASE_DFS;
}
if(!strcmp(argv[f], "-all")) {
arg_cmd = CMD_LIST_ALL;
}
if(!strcmp(argv[f], "-max-itr")) {
if(f == argc-1)
ERROR("maximum iterations argument missing from command line");
max_itr = atol(argv[++f]);
}
if(!strcmp(argv[f], "-pre")) {
if(f == argc -1)
ERROR("preprocessing argument missing from command line");
precomp = atol(argv[++f]);
}
if(!strcmp(argv[f], "-seed")) {
if(f == argc - 1)
ERROR("random seed missing from command line");
seed = atol(argv[++f]);
have_seed = 1;
}
if(!strcmp(argv[f], "-in")) {
if(f == argc - 1)
ERROR("input file missing from command line");
filename = argv[++f];
have_input = 1;
}
}
}
fprintf(stdout, "invoked as:");
for(index_t f = 0; f < argc; f++)
fprintf(stdout, " %s", argv[f]);
fprintf(stdout, "\n");
if(have_seed == 0) {
fprintf(stdout,
"no random seed given, defaulting to %ld\n", seed);
}
fprintf(stdout, "random seed = %ld\n", seed);
if(max_itr == UNDEFINED) {
max_itr = 10000;
fprintf(stdout,
"no max iterations give, defaulting to %ld\n", max_itr);
}
fprintf(stdout, "max iterations = %ld\n", max_itr);
FILE *in = stdin;
if(have_input) {
in = fopen(filename, "r");
if(in == NULL)
ERROR("unable to open file '%s'", filename);
} else {
fprintf(stdout, "no input file specified, defaulting to stdin\n");
}
fflush(stdout);
srand(seed);
graph_t *g;
index_t k;
index_t *kk;
index_t input_cmd;
index_t *cmd_args;
// read graph
reader_ascii(in, &g, &k, &kk, &input_cmd, &cmd_args);
index_t cmd = input_cmd; // by default execute command in input stream
if(arg_cmd != CMD_NOP)
cmd = arg_cmd; // override command in input stream
// build root query
//index_t is_dir = 0;
temppathq_t *root = (temppathq_t *) 0;
if(g->is_directed) {
//is_dir = 1;
root = build_temppathq_dir(g, k, kk);
} else {
root = build_temppathq(g, k, kk);
}
graph_free(g); // free graph
push_time();
// preprocess query and time it
push_time();
index_t *v_map1;
switch(precomp) {
case PRE_NOP:
{
// no precomputation
fprintf(stdout, "no preprocessing, default execution\n");
break;
}
case PRE_MK1:
{
// preprocess: remove vertices with no matching colors
temppathq_t *root_pre;
query_pre_mk1(root, &root_pre, &v_map1);
temppathq_free(root);
root = root_pre;
//FREE(v_map1);
// preprocessed graph statistics
index_t o_n = root->n;
index_t tmax = root->tmax;
index_t *o_pos = root->pos;
index_t *o_adj = root->adj;
index_t o_m = (o_pos[o_n*(tmax-1) + o_n-1] +
o_adj[o_pos[o_n*(tmax-1) + o_n-1]] - (o_n*tmax) + 1)/2;
fprintf(stdout, "output pre [1]: n = %ld, m = %ld, k = %ld \n",
o_n, o_m, k);
fflush(stdout);
break;
}
default:
break;
}
double precomp_time = pop_time();
push_time();
fprintf(stdout, "command: %s\n", cmd_legend[cmd]);
fflush(stdout);
// execute command
switch(cmd) {
case CMD_NOP:
{
temppathq_free(root);
break;
}
case CMD_BASE_TEMPPATH:
{
index_t *uu;
index_t *tt;
baseline_randwalk_path(max_itr, root, kk, &uu, &tt);
fprintf(stdout, "found [%ld]: ", k);
for(index_t i = 0; i < k-1; i++) {
fprintf(stdout, "[%ld %ld %ld]%s",
uu[i]+1, uu[i+1]+1, tt[i+1]+1, i == k-2 ? "\n" : " ");
}
fprintf(stdout, "max-time: %ld\n", tt[k-1]+1);
FREE(uu);
FREE(tt);
temppathq_free(root);
break;
}
case CMD_BASE_PATHMOTIF:
{
index_t *uu = alloc_idxtab(k);
index_t *tt = alloc_idxtab(k);
if(baseline_randwalk_motif(seed, max_itr, root, kk, uu, tt)) {
fprintf(stdout, "solution [found, %ld]: ", k);
for(index_t i = 0; i < k-1; i++) {
fprintf(stdout, "[%ld %ld %ld]%s",
uu[i]+1, uu[i+1]+1, tt[i+1]+1, i == k-2 ? "\n" : " ");
}
fprintf(stdout, "max-time: %ld\n", tt[k-1]+1);
} else {
fprintf(stdout, "solution [not-found, %ld]: \n", k);
}
FREE(uu);
FREE(tt);
temppathq_free(root);
break;
}
case CMD_BASE_DFS:
{
baseline_dfs(seed, root, kk);
temppathq_free(root);
break;
}
default:
assert(0);
break;
}
// free vertex map
if(precomp == PRE_MK1)
FREE(v_map1);
FREE(kk);
double cmd_time = pop_time();
double time = pop_time();
fprintf(stdout, "command done [%.2lf ms %.2lf ms %.2lf ms %.2lf ms]\n",
precomp_time, cmd_time, time, time);
if(input_cmd != CMD_NOP)
FREE(cmd_args);
time = pop_time();
fprintf(stdout, "grand total [%.2lf ms] ", time);
print_pop_memtrack();
fprintf(stdout, "\n");
fprintf(stdout, "host: %s\n", sysdep_hostname());
fprintf(stdout,
"build: %s\n",
#ifdef BUILD_PARALLEL
"multithreaded"
#else
"single thread"
#endif
);
fprintf(stdout,
"compiler: gcc %d.%d.%d\n",
__GNUC__,
__GNUC_MINOR__,
__GNUC_PATCHLEVEL__);
fflush(stdout);
assert(malloc_balance == 0);
assert(memtrack_stack_top < 0);
return 0;
}
|
convolutiondepthwise_5x5_pack4.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void convdw5x5s1_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out.row(0);
float* outptr1 = out.row(1);
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
const float* r5 = img0.row(5);
int i = 0;
for (; i + 1 < outh; i += 2)
{
int j = 0;
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
__builtin_prefetch(r3 + 160);
__builtin_prefetch(r4 + 160);
__builtin_prefetch(r5 + 160);
__builtin_prefetch(k0 + 800);
v4f32 _sum0 = _bias0;
v4f32 _sum1 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
_sum0 = __msa_fmadd_w(_sum0, _k03, _r03);
_sum0 = __msa_fmadd_w(_sum0, _k04, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k00, _r10);
_sum1 = __msa_fmadd_w(_sum1, _k01, _r11);
_sum1 = __msa_fmadd_w(_sum1, _k02, _r12);
_sum1 = __msa_fmadd_w(_sum1, _k03, _r13);
_sum1 = __msa_fmadd_w(_sum1, _k04, _r14);
v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum0 = __msa_fmadd_w(_sum0, _k13, _r13);
_sum0 = __msa_fmadd_w(_sum0, _k14, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k10, _r20);
_sum1 = __msa_fmadd_w(_sum1, _k11, _r21);
_sum1 = __msa_fmadd_w(_sum1, _k12, _r22);
_sum1 = __msa_fmadd_w(_sum1, _k13, _r23);
_sum1 = __msa_fmadd_w(_sum1, _k14, _r24);
v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum0 = __msa_fmadd_w(_sum0, _k23, _r23);
_sum0 = __msa_fmadd_w(_sum0, _k24, _r24);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k20, _r30);
_sum1 = __msa_fmadd_w(_sum1, _k21, _r31);
_sum1 = __msa_fmadd_w(_sum1, _k22, _r32);
_sum1 = __msa_fmadd_w(_sum1, _k23, _r33);
_sum1 = __msa_fmadd_w(_sum1, _k24, _r34);
v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k30, _r30);
_sum0 = __msa_fmadd_w(_sum0, _k31, _r31);
_sum0 = __msa_fmadd_w(_sum0, _k32, _r32);
_sum0 = __msa_fmadd_w(_sum0, _k33, _r33);
_sum0 = __msa_fmadd_w(_sum0, _k34, _r34);
v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0);
v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0);
v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0);
v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0);
v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k30, _r40);
_sum1 = __msa_fmadd_w(_sum1, _k31, _r41);
_sum1 = __msa_fmadd_w(_sum1, _k32, _r42);
_sum1 = __msa_fmadd_w(_sum1, _k33, _r43);
_sum1 = __msa_fmadd_w(_sum1, _k34, _r44);
v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 -= 4 * 20;
_sum0 = __msa_fmadd_w(_sum0, _k40, _r40);
_sum0 = __msa_fmadd_w(_sum0, _k41, _r41);
_sum0 = __msa_fmadd_w(_sum0, _k42, _r42);
_sum0 = __msa_fmadd_w(_sum0, _k43, _r43);
_sum0 = __msa_fmadd_w(_sum0, _k44, _r44);
v4f32 _r50 = (v4f32)__msa_ld_w(r5, 0);
v4f32 _r51 = (v4f32)__msa_ld_w(r5 + 4, 0);
v4f32 _r52 = (v4f32)__msa_ld_w(r5 + 4 * 2, 0);
v4f32 _r53 = (v4f32)__msa_ld_w(r5 + 4 * 3, 0);
v4f32 _r54 = (v4f32)__msa_ld_w(r5 + 4 * 4, 0);
_sum1 = __msa_fmadd_w(_sum1, _k40, _r50);
_sum1 = __msa_fmadd_w(_sum1, _k41, _r51);
_sum1 = __msa_fmadd_w(_sum1, _k42, _r52);
_sum1 = __msa_fmadd_w(_sum1, _k43, _r53);
_sum1 = __msa_fmadd_w(_sum1, _k44, _r54);
__msa_st_w((v4i32)_sum0, outptr0, 0);
__msa_st_w((v4i32)_sum1, outptr1, 0);
outptr0 += 4;
outptr1 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
}
r0 += 4 * 4 + w * 4;
r1 += 4 * 4 + w * 4;
r2 += 4 * 4 + w * 4;
r3 += 4 * 4 + w * 4;
r4 += 4 * 4 + w * 4;
r5 += 4 * 4 + w * 4;
outptr0 += outw * 4;
outptr1 += outw * 4;
}
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
__builtin_prefetch(r3 + 160);
__builtin_prefetch(r4 + 160);
__builtin_prefetch(k0 + 800);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
_sum0 = __msa_fmadd_w(_sum0, _k03, _r03);
_sum0 = __msa_fmadd_w(_sum0, _k04, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum0 = __msa_fmadd_w(_sum0, _k13, _r13);
_sum0 = __msa_fmadd_w(_sum0, _k14, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum0 = __msa_fmadd_w(_sum0, _k23, _r23);
_sum0 = __msa_fmadd_w(_sum0, _k24, _r24);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0);
v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k30, _r30);
_sum0 = __msa_fmadd_w(_sum0, _k31, _r31);
_sum0 = __msa_fmadd_w(_sum0, _k32, _r32);
_sum0 = __msa_fmadd_w(_sum0, _k33, _r33);
_sum0 = __msa_fmadd_w(_sum0, _k34, _r34);
v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0);
v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0);
v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0);
v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0);
v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0);
v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 -= 4 * 20;
_sum0 = __msa_fmadd_w(_sum0, _k40, _r40);
_sum0 = __msa_fmadd_w(_sum0, _k41, _r41);
_sum0 = __msa_fmadd_w(_sum0, _k42, _r42);
_sum0 = __msa_fmadd_w(_sum0, _k43, _r43);
_sum0 = __msa_fmadd_w(_sum0, _k44, _r44);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
}
r0 += 4 * 4;
r1 += 4 * 4;
r2 += 4 * 4;
r3 += 4 * 4;
r4 += 4 * 4;
}
}
}
static void convdw5x5s2_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
const int group = bottom_blob.c;
const int tailstep = (w - 2 * outw + w) * 4;
const float* bias = _bias;
#pragma omp parallel for num_threads(opt.num_threads)
for (int g = 0; g < group; g++)
{
Mat out = top_blob.channel(g);
v4f32 _bias0 = bias ? (v4f32)__msa_ld_w(bias + g * 4, 0) : (v4f32)__msa_fill_w(0);
const float* k0 = kernel.row(g);
float* outptr0 = out;
const Mat img0 = bottom_blob.channel(g);
const float* r0 = img0.row(0);
const float* r1 = img0.row(1);
const float* r2 = img0.row(2);
const float* r3 = img0.row(3);
const float* r4 = img0.row(4);
int i = 0;
for (; i < outh; i++)
{
int j = 0;
for (; j < outw; j++)
{
__builtin_prefetch(r0 + 160);
__builtin_prefetch(r1 + 160);
__builtin_prefetch(r2 + 160);
__builtin_prefetch(r3 + 160);
__builtin_prefetch(r4 + 160);
__builtin_prefetch(k0 + 800);
v4f32 _sum0 = _bias0;
v4f32 _r00 = (v4f32)__msa_ld_w(r0, 0);
v4f32 _r01 = (v4f32)__msa_ld_w(r0 + 4, 0);
v4f32 _r02 = (v4f32)__msa_ld_w(r0 + 4 * 2, 0);
v4f32 _r03 = (v4f32)__msa_ld_w(r0 + 4 * 3, 0);
v4f32 _r04 = (v4f32)__msa_ld_w(r0 + 4 * 4, 0);
v4f32 _k00 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k01 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k02 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k03 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k04 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k00, _r00);
_sum0 = __msa_fmadd_w(_sum0, _k01, _r01);
_sum0 = __msa_fmadd_w(_sum0, _k02, _r02);
_sum0 = __msa_fmadd_w(_sum0, _k03, _r03);
_sum0 = __msa_fmadd_w(_sum0, _k04, _r04);
v4f32 _r10 = (v4f32)__msa_ld_w(r1, 0);
v4f32 _r11 = (v4f32)__msa_ld_w(r1 + 4, 0);
v4f32 _r12 = (v4f32)__msa_ld_w(r1 + 4 * 2, 0);
v4f32 _r13 = (v4f32)__msa_ld_w(r1 + 4 * 3, 0);
v4f32 _r14 = (v4f32)__msa_ld_w(r1 + 4 * 4, 0);
v4f32 _k10 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k11 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k12 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k13 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k14 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k10, _r10);
_sum0 = __msa_fmadd_w(_sum0, _k11, _r11);
_sum0 = __msa_fmadd_w(_sum0, _k12, _r12);
_sum0 = __msa_fmadd_w(_sum0, _k13, _r13);
_sum0 = __msa_fmadd_w(_sum0, _k14, _r14);
v4f32 _r20 = (v4f32)__msa_ld_w(r2, 0);
v4f32 _r21 = (v4f32)__msa_ld_w(r2 + 4, 0);
v4f32 _r22 = (v4f32)__msa_ld_w(r2 + 4 * 2, 0);
v4f32 _r23 = (v4f32)__msa_ld_w(r2 + 4 * 3, 0);
v4f32 _r24 = (v4f32)__msa_ld_w(r2 + 4 * 4, 0);
v4f32 _k20 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k21 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k22 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k23 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k24 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k20, _r20);
_sum0 = __msa_fmadd_w(_sum0, _k21, _r21);
_sum0 = __msa_fmadd_w(_sum0, _k22, _r22);
_sum0 = __msa_fmadd_w(_sum0, _k23, _r23);
_sum0 = __msa_fmadd_w(_sum0, _k24, _r24);
v4f32 _r30 = (v4f32)__msa_ld_w(r3, 0);
v4f32 _r31 = (v4f32)__msa_ld_w(r3 + 4, 0);
v4f32 _r32 = (v4f32)__msa_ld_w(r3 + 4 * 2, 0);
v4f32 _r33 = (v4f32)__msa_ld_w(r3 + 4 * 3, 0);
v4f32 _r34 = (v4f32)__msa_ld_w(r3 + 4 * 4, 0);
v4f32 _k30 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k31 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k32 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k33 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k34 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 += 4 * 5;
_sum0 = __msa_fmadd_w(_sum0, _k30, _r30);
_sum0 = __msa_fmadd_w(_sum0, _k31, _r31);
_sum0 = __msa_fmadd_w(_sum0, _k32, _r32);
_sum0 = __msa_fmadd_w(_sum0, _k33, _r33);
_sum0 = __msa_fmadd_w(_sum0, _k34, _r34);
v4f32 _r40 = (v4f32)__msa_ld_w(r4, 0);
v4f32 _r41 = (v4f32)__msa_ld_w(r4 + 4, 0);
v4f32 _r42 = (v4f32)__msa_ld_w(r4 + 4 * 2, 0);
v4f32 _r43 = (v4f32)__msa_ld_w(r4 + 4 * 3, 0);
v4f32 _r44 = (v4f32)__msa_ld_w(r4 + 4 * 4, 0);
v4f32 _k40 = (v4f32)__msa_ld_w(k0, 0);
v4f32 _k41 = (v4f32)__msa_ld_w(k0 + 4, 0);
v4f32 _k42 = (v4f32)__msa_ld_w(k0 + 4 * 2, 0);
v4f32 _k43 = (v4f32)__msa_ld_w(k0 + 4 * 3, 0);
v4f32 _k44 = (v4f32)__msa_ld_w(k0 + 4 * 4, 0);
k0 -= 4 * 20;
_sum0 = __msa_fmadd_w(_sum0, _k40, _r40);
_sum0 = __msa_fmadd_w(_sum0, _k41, _r41);
_sum0 = __msa_fmadd_w(_sum0, _k42, _r42);
_sum0 = __msa_fmadd_w(_sum0, _k43, _r43);
_sum0 = __msa_fmadd_w(_sum0, _k44, _r44);
__msa_st_w((v4i32)_sum0, outptr0, 0);
outptr0 += 4;
r0 += 4 * 2;
r1 += 4 * 2;
r2 += 4 * 2;
r3 += 4 * 2;
r4 += 4 * 2;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
r3 += tailstep;
r4 += tailstep;
}
}
}
|
GB_subassign_07.c | //------------------------------------------------------------------------------
// GB_subassign_07: C(I,J)<M> += scalar ; no S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 07: C(I,J)<M> += scalar ; no S
// M: present
// Mask_comp: false
// C_replace: false
// accum: present
// A: scalar
// S: none
// C: not bitmap
// M: any sparsity
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_07
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct,
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_MATRIX_WAIT_IF_JUMBLED (C) ;
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_GET_C ; // C must not be bitmap
int64_t zorig = C->nzombies ;
const int64_t *restrict Ch = C->h ;
const int64_t *restrict Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
const int64_t Cnvec = C->nvec ;
GB_GET_MASK ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 07: C(I,J)<M> += scalar ; no S
//--------------------------------------------------------------------------
// Time: Close to Optimal: same as Method 05.
// Method 05 and Method 07 are very similar. Also compare with Method 06n.
//--------------------------------------------------------------------------
// Parallel: slice M into coarse/fine tasks (Method 05, 06n, 07)
//--------------------------------------------------------------------------
GB_SUBASSIGN_ONE_SLICE (M) ; // M cannot be jumbled
//--------------------------------------------------------------------------
// phase 1: undelete zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of M
//------------------------------------------------------------------
int64_t j = GBH (Mh, k) ;
GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ;
int64_t mjnz = pM_end - pM ;
if (mjnz == 0) continue ;
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
int64_t cjnz = pC_end - pC_start ;
bool cjdense = (cjnz == Cvlen) ;
//------------------------------------------------------------------
// C(I,jC)<M(:,j)> += scalar ; no S
//------------------------------------------------------------------
if (cjdense)
{
//--------------------------------------------------------------
// C(:,jC) is dense so the binary search of C is not needed
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// update C(iC,jC), but only if M(iA,j) allows it
//----------------------------------------------------------
bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t iA = GBI (Mi, pM, Mvlen) ;
GB_iC_DENSE_LOOKUP ;
// ----[C A 1] or [X A 1]-------------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
}
}
}
else
{
//--------------------------------------------------------------
// C(:,jC) is sparse; use binary search for C
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// update C(iC,jC), but only if M(iA,j) allows it
//----------------------------------------------------------
bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t iA = GBI (Mi, pM, Mvlen) ;
// find C(iC,jC) in C(:,jC)
GB_iC_BINARY_SEARCH ;
if (cij_found)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_withaccum_C_A_1_scalar ;
}
else
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
zorig = C->nzombies ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//------------------------------------------------------------------
// get j, the kth vector of M
//------------------------------------------------------------------
int64_t j = GBH (Mh, k) ;
GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ;
int64_t mjnz = pM_end - pM ;
if (mjnz == 0) continue ;
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
GB_GET_jC ;
bool cjdense = ((pC_end - pC_start) == Cvlen) ;
//------------------------------------------------------------------
// C(I,jC)<M(:,j)> += scalar ; no S
//------------------------------------------------------------------
if (!cjdense)
{
//--------------------------------------------------------------
// C(:,jC) is sparse; use binary search for C
//--------------------------------------------------------------
for ( ; pM < pM_end ; pM++)
{
//----------------------------------------------------------
// update C(iC,jC), but only if M(iA,j) allows it
//----------------------------------------------------------
bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ;
if (mij)
{
int64_t iA = GBI (Mi, pM, Mvlen) ;
// find C(iC,jC) in C(:,jC)
GB_iC_BINARY_SEARCH ;
if (!cij_found)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
GB_PENDING_INSERT (scalar) ;
}
}
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
laplace2d-04s.c | /*
* Copyright 2012 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <string.h>
#include <stdio.h>
#include <omp.h>
#define NN 4096
#define NM 4096
double A[NN][NM];
double Anew[NN][NM];
int main(int argc, char** argv)
{
const int n = NN;
const int m = NM;
const int iter_max = 200;
const double tol = 1.0e-6;
double error = 1.0;
memset(A, 0, n * m * sizeof(double));
memset(Anew, 0, n * m * sizeof(double));
for (int j = 0; j < n; j++)
{
A[j][0] = 1.0;
Anew[j][0] = 1.0;
}
printf("Jacobi relaxation Calculation: %d x %d mesh\n", n, m);
double st = omp_get_wtime();
int iter = 0;
#pragma omp target data map(alloc:Anew) map(A)
while ( error > tol && iter < iter_max )
{
error = 0.0;
#pragma omp target teams distribute
for( int j = 1; j < n-1; j++)
{
#pragma omp parallel for reduction(max:error)
for( int i = 1; i < m-1; i++ )
{
Anew[j][i] = 0.25 * ( A[j][i+1] + A[j][i-1]
+ A[j-1][i] + A[j+1][i]);
error = fmax( error, fabs(Anew[j][i] - A[j][i]));
}
}
#pragma omp target teams distribute
for( int j = 1; j < n-1; j++)
{
#pragma omp parallel for
for( int i = 1; i < m-1; i++ )
{
A[j][i] = Anew[j][i];
}
}
if(iter % 100 == 0) printf("%5d, %0.6f\n", iter, error);
iter++;
}
double et = omp_get_wtime();
printf(" total: %f s\n", (et - st));
return 0;
}
|
clang-262701.c | #include <stdio.h>
#include <string.h>
#include <omp.h>
#define THREADS 2
#define TEAMS 2
int main(){
int gpu_results[THREADS];
int correct_results[THREADS] = {2,2};
#pragma omp target teams thread_limit(THREADS) num_teams(TEAMS) map(from:gpu_results)
{
int dist[THREADS];
// Uncomment line below to trigger generic kernel before fix was in place
//dist[0] = 0;
#pragma omp parallel
{
int thread = omp_get_thread_num();
int team = omp_get_team_num();
dist[thread] = 0;
#pragma omp barrier
dist[thread] += 1;
#pragma omp barrier
if(thread == 0) {
for(int i = 1; i < THREADS; i++)
dist[0] += dist[i];
gpu_results[team] = dist[0];
}
}
}
int status = memcmp(correct_results, gpu_results, THREADS * sizeof(int));
if (status != 0){
printf("FAIL\n");
return 1;
}
printf("PASS\n");
return 0;
}
|
declare-variant-4.c | double f1 (int, long, float);
double f2 (int, long, float);
double f3 (int, long, float);
double f4 (int, long, float);
double f5 (int, long, float);
#pragma omp declare variant (f1) match (user={condition(1)})
#pragma omp declare variant (f2) match (user={condition(score(1):1)})
#pragma omp declare variant (f3) match (user={condition(score(3):1)})
#pragma omp declare variant (f4) match (user={condition(score(2):1)})
#pragma omp declare variant (f5) match (implementation={vendor(gnu)})
double
f6 (int x, long y, float z)
{
return z + x + y;
}
double
test (int x)
{
return f6 (x, x, 3.5f);
}
|
sample_shared_private.c | /* Andre Augusto Giannotti Scota (https://sites.google.com/view/a2gs/) */
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "openmp_util.h"
int main(int argc, char *argv[])
{
int factor, i, resultSum;
long resultMul;
factor = i = resultSum = 0;
resultMul = 1;
#pragma omp parallel shared(resultSum, resultMul) private(factor, i)
{
for(i = 1; i < 5; i++){
factor = omp_get_thread_num() + i;
#pragma omp critical
{
resultSum += factor;
resultMul *= factor;
printf("Thread Id: [%d] - Factor: [%d] - Sum: [%d] - Mult: [%ld]\n", omp_get_thread_num(), factor, resultSum, resultMul);
}
}
}
printf("\nThread Id: [%d] - Sum: [%d] - Mult: [%ld]\n", omp_get_thread_num(), resultSum, resultMul);
return(0);
}
|
NLmean_propag2dirs_sspacing4_tspacing4_sim12_acc12_neighbor5_tau0100.c | /*
* compile: gcc -O3 -std=c99 -o [filename_out] -fopenmp [filename].c -lm -I/usr/include/netcdf-3/ -L/usr/lib64/ -lnetcdf -lnetcdf_c++
* in the terminal: export OMP_NUM_THREADS=3
*/
#include<stdio.h>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <netcdf.h>
#include <omp.h>
/* This is the name of the data file we will read. */
#define FILENAME_RD "/data/PhDworks/isotropic/NLM/Udiff_spacespacing4.nc"
#define FILENAME_WR "/data/PhDworks/isotropic/NLM/NLmean_propag2dirs_sspacing4_tspacing4_sim12_acc12_neighbor5_tau0100.nc"
/* all constants */
#define N_HR 96
#define SCALE_FACTOR_SPACE 4
#define SCALE_FACTOR_TIME 4
#define SIM_HAFTSIZE 12
#define ACC_HAFTSIZE 12
#define NEIGHBOR_HAFTSIZE 5
#define SIM_FULLSIZE (2 * SIM_HAFTSIZE + 1)
#define ACC_FULLSIZE (2 * ACC_HAFTSIZE + 1)
#define NEIGHBOR_FULLSIZE (2 * NEIGHBOR_HAFTSIZE + 1)
#define TAU 0.1
#define NUM_VARS 1
#define NUM_SCALES 2
#define NUM_3DSNAPS 37 /* #3D snapshots */
#define NUM_BLOCKS N_HR/SCALE_FACTOR_TIME - 1 /* #(1:SCALE_FACTOR_TIME:N_HR) - 1*/
#define NUM_2DSNAPS (SCALE_FACTOR_TIME * NUM_BLOCKS + 1) /* #2D snapshots in each 3D block */
#define NDIMS 4
/* Handle errors by printing an error message and exiting with a non-zero status. */
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
/* **********************************************************************************/
/* ****************************** USEFUL FUNCTIONS **********************************/
/* **********************************************************************************/
/*
* get_onesnap: take part of a big array(arr1) and put to small one (arr2): arr2 = arr1[id_start:id_end]
*/
void get_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr2[i - id_start] = arr1[i];
}
/*
* put_onesnap: assign small array (arr2) into biger one (arr1): arr1[id_start:id_end] = arr2
*/
void put_onesnap(double *arr1,double *arr2, int id_start, int id_end)
{
for (int i = id_start; i < id_end + 1; i++)
arr1[i] = arr2[i - id_start];
}
/*
* norm_by_weight: normalize x[dim] by weight W[dim]
*/
void norm_by_weight(int dim, double *x, double *W)
{
for (int k = 0; k < dim; k++)
x[k] = x[k]/W[k];
}
void add_mat(int dim, double *sum, double *x1, double *x2)
{
for (int k = 0; k < dim; k++)
sum[k] = x1[k] + x2[k];
}
void initialize(int dim, double *x, double val)
{
for (int k = 0; k < dim; k++)
x[k] = val;
}
/* **********************************************************************************/
/* ****************************** NETCDF UTILS **************************************/
/* **********************************************************************************/
/*
* creat_netcdf: create the netcdf file [filename] contain [num_vars] variables
* variable names are [varname]
*/
void create_netcdf(char *filename, int num_vars, char *varname[num_vars])
{
int ncid_wr, retval_wr;
int vel_varid_wr;
int Nt, Nx, Ny, Nz;
int dimids[NDIMS];
/* Create the file. */
if ((retval_wr = nc_create(filename, NC_CLOBBER, &ncid_wr)))
ERR(retval_wr);
/* Define the dimensions. The record dimension is defined to have
* unlimited length - it can grow as needed.*/
if ((retval_wr = nc_def_dim(ncid_wr, "Ny", N_HR, &Ny)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nz", N_HR, &Nz)))
ERR(retval_wr);
if ((retval_wr = nc_def_dim(ncid_wr, "Nt", NC_UNLIMITED, &Nt)))
ERR(retval_wr);
/* Define the netCDF variables for the data. */
dimids[0] = Nt;
dimids[1] = Nx;
dimids[2] = Ny;
dimids[3] = Nz;
for (int i = 0; i<num_vars; i++)
{
if ((retval_wr = nc_def_var(ncid_wr, varname[i], NC_FLOAT, NDIMS, dimids, &vel_varid_wr)))
ERR(retval_wr);
}
/* End define mode (SHOULD NOT FORGET THIS!). */
if ((retval_wr = nc_enddef(ncid_wr)))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS creating file: %s!\n", filename);
}
/*
* write_netcdf:
* write into [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps] started at [snap_start]
*/
void write_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_wr, retval_wr;
int vel_varid_wr;
/* Open the file. NC_WRITE tells netCDF we want read-only access to the file.*/
if ((retval_wr = nc_open(filename, NC_WRITE, &ncid_wr)))
ERR(retval_wr);
/* Get variable*/
if ((retval_wr = nc_inq_varid(ncid_wr, varname, &vel_varid_wr)))
ERR(retval_wr);;
/* Put variable*/
if ((retval_wr = nc_put_vara_double(ncid_wr, vel_varid_wr, start, count, &snaps[0])))
ERR(retval_wr);
/* Close the file. */
if ((retval_wr = nc_close(ncid_wr)))
ERR(retval_wr);
printf("\n *** SUCCESS writing variables \"%s\" to \"%s\"!\n", varname, filename);
}
/*
* read_netcdf: read from [filename], variable [varname] [snap_end - snap_start + 1 ] snapshots [snaps]
* started at [snap_start]
*/
void read_netcdf(char *filename, char *varname, size_t *start, size_t *count, double *snaps)
{
int ncid_rd, retval_rd;
int vel_varid_rd;
/* ******** PREPARE TO READ ************* */
/* Open the file. NC_NOWRITE tells netCDF we want read-only access to the file.*/
if ((retval_rd = nc_open(filename, NC_NOWRITE, &ncid_rd)))
ERR(retval_rd);
/* Get the varids of the velocity in netCDF */
if ((retval_rd = nc_inq_varid(ncid_rd, varname, &vel_varid_rd)))
ERR(retval_rd);
if ((retval_rd = nc_get_vara_double(ncid_rd, vel_varid_rd, start, count, &snaps[0])))
ERR(retval_rd);
/* Close the file, freeing all resources. */
if ((retval_rd = nc_close(ncid_rd)))
ERR(retval_rd);
printf("\n *** SUCCESS reading variables \"%s\" from \"%s\" \n", varname, filename);
}
/* **********************************************************************************/
/* ****************************** ESTIMATE_DISTANCE *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
void generate_grids(int *gridpatches_y, int *gridpatches_z, int * acc_ids)
{
int neighbor_id, sim_id;
int gridyoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], gridzoffset_neighbor[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE];
for (int m = 0; m < NEIGHBOR_FULLSIZE; m++)
{
for (int n = 0; n < NEIGHBOR_FULLSIZE; n++)
{
gridyoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = m - NEIGHBOR_HAFTSIZE;
gridzoffset_neighbor[m * NEIGHBOR_FULLSIZE + n] = n - NEIGHBOR_HAFTSIZE;
}
}
int gridyoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE], gridzoffset_sim[SIM_FULLSIZE * SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
{
for (int q = 0; q < SIM_FULLSIZE; q++)
{
gridyoffset_sim[p * SIM_FULLSIZE + q] = p - SIM_HAFTSIZE;
gridzoffset_sim[p * SIM_FULLSIZE + q] = q - SIM_HAFTSIZE;
}
}
int grid_sim[SIM_FULLSIZE][SIM_FULLSIZE];
for (int p = 0; p < SIM_FULLSIZE; p++)
for (int q = 0; q < SIM_FULLSIZE; q++)
grid_sim[p][q] = p * SIM_FULLSIZE + q;
for (int p = 0; p < ACC_FULLSIZE; p++)
for (int q = 0; q < ACC_FULLSIZE; q++)
acc_ids[p * ACC_FULLSIZE + q] = grid_sim[SIM_HAFTSIZE - ACC_HAFTSIZE + p][SIM_HAFTSIZE - ACC_HAFTSIZE + q];
int valy, valz;
long int grid_id;
for (int i = 0; i < N_HR; i++)
{
for (int j = 0; j < N_HR; j++)
{
for (int neighbor_id = 0; neighbor_id < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; neighbor_id++)
{
for (int sim_id = 0; sim_id < SIM_FULLSIZE * SIM_FULLSIZE; sim_id++)
{
grid_id = i * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ j * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ neighbor_id * SIM_FULLSIZE * SIM_FULLSIZE + sim_id;
valy = i + gridyoffset_neighbor[neighbor_id] + gridyoffset_sim[sim_id];
valz = j + gridzoffset_neighbor[neighbor_id] + gridzoffset_sim[sim_id];
if (valy < 0)
gridpatches_y[grid_id] = (N_HR - 1) + valy;
else if (valy > (N_HR - 1))
gridpatches_y[grid_id] = valy - (N_HR - 1);
else
gridpatches_y[grid_id] = valy;
if (valz < 0)
gridpatches_z[grid_id] = (N_HR - 1) + valz;
else if (valz > (N_HR - 1))
gridpatches_z[grid_id] = valz - (N_HR - 1);
else
gridpatches_z[grid_id] = valz;
}
}
}
}
//printf("\n gridpatches_z: %i \n", gridpatches_y[0]);
}
/* **********************************************************************************/
/* ****************************** NLMEAN *********************************/
/* **********************************************************************************/
/*
* estimate_distance: estimate the distances between ref patch and moving patches (prev and after)
* patches are of fixed size (2*SIM_HAFTSIZE+1) x (2*SIM_HAFTSIZE+1)
* reference patch are centered at [center_ref_idy, center_ref_idz]
* moving patches are centered at [center_moving_idy, center_moving_idz]
* dist_all contain 2 elements: distances to moving patches in the prev and after plane
* x_ref: reference plane
* x_prev: previous plane
* x_after: plane after
* ref_ids_y(z): indices of points in reference patch
* moving_ids_y(z): indices of points in moving patch
*/
/*void fusion(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion,
int gridpatches_y[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int gridpatches_z[N_HR][N_HR][NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE][SIM_FULLSIZE * SIM_FULLSIZE],
int acc_ids[NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE], int est_idy, int est_idz)*/
void NLmean(double *x_NLM, double *weight_NLM, double *x_ref, double *x_moving, double *x_fusion, int *gridy, int *gridz, int *accids)
{
double norm_fact = 1.0/((double) (SIM_FULLSIZE * SIM_FULLSIZE));
int ri = NEIGHBOR_HAFTSIZE * NEIGHBOR_FULLSIZE + NEIGHBOR_HAFTSIZE;
int est_idy;
#pragma omp parallel for private (est_idy)
for (est_idy = 0; est_idy < N_HR; est_idy++)
for (int est_idz = 0; est_idz < N_HR; est_idz++)
for (int ni = 0; ni < NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE; ni++)
{
int ref_idy, ref_idz, moving_idy, moving_idz;
double du;
double d = 0.0;
long int grid_rid, grid_nid;
for (int si = 0; si < SIM_FULLSIZE * SIM_FULLSIZE; si++)
{
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + si ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + si;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
//compute distance btw reference patch and fusion patch
du = x_ref[ref_idy * N_HR + ref_idz] - x_moving[moving_idy * N_HR + moving_idz];
d = d + norm_fact*du*du;
}
double w = exp(-d/(2.0*TAU*TAU));
for(int k = 0; k < ACC_FULLSIZE * ACC_FULLSIZE; k++)
{
int ai = accids[k];
grid_rid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ri * SIM_FULLSIZE * SIM_FULLSIZE + ai ;
grid_nid = est_idy * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE
+ est_idz * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE + ni * SIM_FULLSIZE * SIM_FULLSIZE + ai;
ref_idy = gridy[grid_rid];
moving_idy = gridy[grid_nid];
ref_idz = gridz[grid_rid];
moving_idz = gridz[grid_nid];
x_NLM[ref_idy * N_HR + ref_idz] = x_NLM[ref_idy * N_HR + ref_idz] + w*x_fusion[moving_idy * N_HR + moving_idz];
weight_NLM[ref_idy * N_HR + ref_idz] = weight_NLM[ref_idy * N_HR + ref_idz] + w;
}
//printf("\n w=%f\n ",w);
}
}
void propag_forward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_bound1, int t_offset)
{
for (int t_est = t_first + 1; t_est <= t_bound1; t_est++)
{
int t_prev = t_est - 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_backward(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_last, int t_bound2, int t_offset)
{
for (int t_est = t_last - 1; t_est >= t_bound2; --t_est)
{
int t_prev = t_est + 1;
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
get_onesnap(Xlf, xref_lf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
// Propagation from previous planes
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_est * N_HR * N_HR, t_offset + (t_est + 1) * N_HR * N_HR - 1);
}
}
void propag_2planes(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_mid, int t_offset)
{
double xref_lf[N_HR * N_HR], xref_hf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR], w[N_HR * N_HR];
int t_prev = t_mid - 1;
int t_after = t_mid + 1;
//Initialize with zeros
initialize(N_HR * N_HR, xref_hf, 0.0);
initialize(N_HR * N_HR, w, 0.0);
get_onesnap(Xlf, xref_lf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_prev * N_HR * N_HR, t_offset + (t_prev + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + t_after * N_HR * N_HR, t_offset + (t_after + 1) * N_HR * N_HR - 1);
NLmean(xref_hf, w, xref_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref_hf, w);
put_onesnap(Xrec, xref_hf, t_offset + t_mid * N_HR * N_HR, t_offset + (t_mid + 1) * N_HR * N_HR - 1);
}
void propag_towardcenter(double *Xrec, double *Xlf, int *gridy, int *gridz, int *accids, int t_first, int t_offset)
{
double xref1_lf[N_HR * N_HR], xref2_lf[N_HR * N_HR], xmov_lf[N_HR * N_HR], xmov_hf[N_HR * N_HR];
double xref1_hf[N_HR * N_HR], w1[N_HR * N_HR], xref2_hf[N_HR * N_HR], w2[N_HR * N_HR];
int tc = (int)SCALE_FACTOR_TIME/2;
if (SCALE_FACTOR_TIME % 2) { tc = (int)SCALE_FACTOR_TIME/2 + 1; }
for (int td = 1; td < tc; td++)
{
int t1 = t_first + td; // bound on left side
int t2 = t_first + SCALE_FACTOR_TIME - td; // bound on right side
// Initialize with zeros
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
initialize(N_HR * N_HR, xref2_hf, 0.0);
initialize(N_HR * N_HR, w2, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xref2_lf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
//Propagate from left bound
get_onesnap(Xlf, xmov_lf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t1 - 1) * N_HR * N_HR, t_offset + t1 * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
//Propagate from right bound
get_onesnap(Xlf, xmov_lf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t2 + 1) * N_HR * N_HR, t_offset + (t2 + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
NLmean(xref2_hf, w2, xref2_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
// Normalize and put back
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + t1 * N_HR * N_HR, t_offset + (t1 + 1) * N_HR * N_HR - 1);
norm_by_weight(N_HR*N_HR, xref2_hf, w2);
put_onesnap(Xrec, xref2_hf, t_offset + t2 * N_HR * N_HR, t_offset + (t2 + 1) * N_HR * N_HR - 1);
}
// Last plane in the center
if (SCALE_FACTOR_TIME % 2 == 0)
{
initialize(N_HR * N_HR, xref1_hf, 0.0);
initialize(N_HR * N_HR, w1, 0.0);
get_onesnap(Xlf, xref1_lf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc - 1) * N_HR * N_HR, t_offset + (t_first + tc) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
get_onesnap(Xlf, xmov_lf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
get_onesnap(Xrec, xmov_hf, t_offset + (t_first + tc + 1) * N_HR * N_HR, t_offset + (t_first + tc + 2) * N_HR * N_HR - 1);
NLmean(xref1_hf, w1, xref1_lf, xmov_lf, xmov_hf, gridy, gridz, accids);
norm_by_weight(N_HR*N_HR, xref1_hf, w1);
put_onesnap(Xrec, xref1_hf, t_offset + (t_first + tc) * N_HR * N_HR, t_offset + (t_first + tc + 1) * N_HR * N_HR - 1);
}
}
/* **********************************************************************************/
/* ********************************** MAIN FUNCTION *********************************/
/* **********************************************************************************/
int main()
{
/* Creat the file to save results */
char *varnames[NUM_VARS] = {"x_rec_all"};
create_netcdf(FILENAME_WR, NUM_VARS, varnames);
/* Allocate memory */
double *x_fusion_lf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_fusion_hf_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
double *x_rec_all = (double*)malloc(NUM_3DSNAPS * NUM_2DSNAPS * N_HR * N_HR * sizeof(double));
/* read all snapshots */
size_t start_ids[4] = {0, 0, 0, 0};
size_t count_ids[4] = {NUM_3DSNAPS, NUM_2DSNAPS, N_HR, N_HR };
read_netcdf(FILENAME_RD, "Uinterp_all", start_ids, count_ids, x_fusion_lf_all);
read_netcdf(FILENAME_RD, "Udiff_all", start_ids, count_ids, x_fusion_hf_all);
double time_all_start = omp_get_wtime();
double *x_current_lf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_current_hf = (double*)malloc(N_HR * N_HR * sizeof(double));
double *x_rec = (double*)malloc(N_HR * N_HR * sizeof(double));
long int grid_size = N_HR * N_HR * NEIGHBOR_FULLSIZE * NEIGHBOR_FULLSIZE * SIM_FULLSIZE * SIM_FULLSIZE;
int *gridpatches_y = (int*)malloc(grid_size * sizeof(int));
int *gridpatches_z = (int*)malloc(grid_size * sizeof(int));
int *acc_ids = (int*)malloc(ACC_FULLSIZE * ACC_FULLSIZE * sizeof(int));
generate_grids(gridpatches_y, gridpatches_z, acc_ids);
for(int snap3d_id = 0; snap3d_id < NUM_3DSNAPS; snap3d_id++)
{
int t_offset = snap3d_id * NUM_2DSNAPS * N_HR*N_HR;
// put first PIV
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + 0 * N_HR * N_HR, t_offset + 1 * N_HR * N_HR - 1);
int block_id;
for(block_id = 0; block_id < NUM_BLOCKS; block_id++)
{
double time_start = omp_get_wtime();
int t_first = SCALE_FACTOR_TIME*block_id;
int t_last = SCALE_FACTOR_TIME*(block_id+1);
// Put last PIV of the block
get_onesnap(x_fusion_hf_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
put_onesnap(x_rec_all, x_current_hf, t_offset + t_last * N_HR * N_HR, t_offset + (t_last + 1) * N_HR * N_HR - 1);
propag_towardcenter(x_rec_all, x_fusion_lf_all, gridpatches_y, gridpatches_z, acc_ids, t_first, t_offset);
printf("\n Estimated block %i (total 23) in 3D snapshot %i (total 37) in %f seconds \n", block_id, snap3d_id, (double)omp_get_wtime() - time_start);
}
}
// Write to file
write_netcdf(FILENAME_WR, "x_rec_all", start_ids, count_ids, x_rec_all);
/* free memory */
free(x_rec); free(x_current_lf); free(x_current_hf);
free(x_rec_all); free(x_fusion_lf_all); free(x_fusion_hf_all);
free(gridpatches_y); free(gridpatches_z); free(acc_ids);
printf("\n FINISH ALL COMPUTATION IN %f SECONDS \n", (double)omp_get_wtime() - time_all_start);
return 1;
}
|
agent_uid_map.h | // -----------------------------------------------------------------------------
//
// Copyright (C) The BioDynaMo Project.
// All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
//
// See the LICENSE file distributed with this work for details.
// See the NOTICE file distributed with this work for additional information
// regarding copyright ownership.
//
// -----------------------------------------------------------------------------
#ifndef CORE_CONTAINER_AGENT_UID_MAP_H_
#define CORE_CONTAINER_AGENT_UID_MAP_H_
#include <limits>
#include <vector>
#include "core/agent/agent_uid.h"
namespace bdm {
/// AgentUidMap is an associative container that exploits the properties of
/// AgentUid to store data in contigous arrays. Inserting elements and reading
/// elements at the same time is thread-safe as long as the keys are different.
/// These operations with distinct keys are lock-free and atomic free, and thus
/// offer high-performance.
template <typename TValue>
class AgentUidMap {
struct Iterator {
AgentUidMap* map_;
uint64_t idx_;
};
public:
AgentUidMap() {}
AgentUidMap(const AgentUidMap& other)
: data_(other.data_), agent_uid_reused_(other.agent_uid_reused_) {}
explicit AgentUidMap(uint64_t initial_size) {
data_.resize(initial_size);
agent_uid_reused_.resize(initial_size, AgentUid::kReusedMax);
}
void resize(uint64_t new_size) { // NOLINT
data_.resize(new_size);
agent_uid_reused_.resize(new_size, AgentUid::kReusedMax);
}
void clear() { // NOLINT
for (auto& el : agent_uid_reused_) {
el = AgentUid::kReusedMax;
}
}
void ParallelClear() {
#pragma omp parallel for
for (uint64_t i = 0; i < data_.size(); ++i) {
agent_uid_reused_[i] = AgentUid::kReusedMax;
}
}
uint64_t size() const { // NOLINT
return data_.size();
}
void Remove(const AgentUid& key) {
if (key.GetIndex() >= data_.size()) {
return;
}
agent_uid_reused_[key.GetIndex()] = AgentUid::kReusedMax;
}
bool Contains(const AgentUid& uid) const {
auto idx = uid.GetIndex();
if (idx >= data_.size()) {
return false;
}
return uid.GetReused() == agent_uid_reused_[idx];
}
void Insert(const AgentUid& uid, const TValue& value) {
auto idx = uid.GetIndex();
data_[idx] = value;
agent_uid_reused_[idx] = uid.GetReused();
}
const TValue& operator[](const AgentUid& key) const {
return data_[key.GetIndex()];
}
typename AgentUid::Reused_t GetReused(uint64_t index) const {
return agent_uid_reused_[index];
}
private:
std::vector<TValue> data_;
std::vector<typename AgentUid::Reused_t> agent_uid_reused_;
};
} // namespace bdm
#endif // CORE_CONTAINER_AGENT_UID_MAP_H_
|
absorb.h | #ifndef ABSORB_H
#define ABSORB_H
#include <math.h>
#include <float.h>
#include <stdio.h>
#include <stdlib.h>
#include <complex.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926535897
#define MIN(x,y) ((x)<(y)?(x):(y))
/**
* Ported from ModelingQuasiPeriodic2D 2017.09.19
* Log decay of Q from qInterior to qmin with approach to boundary
*
* 2018.05.24
* - dt is now rolled up in this field to save compute in the fused derivs + time update method
* invQ ==> dtOmegaInvQ
*/
void setupDtOmegaInvQ_2D(
const long freeSurface,
const long nx,
const long nz,
const long nsponge,
const long nthread,
const float dt,
const float freqQ,
const float qMin,
const float qInterior,
float *dtOmegaInvQ) {
if (freqQ < FLT_EPSILON) {
char msg[1000];
sprintf(msg, "Error -- freqQ [%f] is too small!\n", freqQ);
perror(msg);
exit(EXIT_FAILURE);
}
// check for unphysical q values
if ((qMin < FLT_EPSILON) && (qInterior < FLT_EPSILON)) {
printf("Warning -- qMin and qMax unphysical, dtOmegaInvQ set to zero!\n");
memset(dtOmegaInvQ, 0, sizeof(dtOmegaInvQ));
return;
}
float *qprof = new float[nsponge];
const double lqmin = log(qMin);
const double lqmax = log(qInterior);
for (long ksponge = 0; ksponge < nsponge; ksponge++) {
const double dk = (double) (ksponge) / (double) (nsponge - 1);
const double lq = lqmin + dk * (lqmax - lqmin);
qprof[ksponge] = expf(lq);
}
#pragma omp parallel for num_threads(nthread) schedule(guided)
for (long kx = 0; kx < nx; kx++) {
#pragma omp simd
for (long kz = 0; kz < nz; kz++) {
const long ksx = MIN(kx, (nx - 1 - kx));
const long ksz = (freeSurface) ? (nz - 1 - kz) : MIN(kz, (nz - 1 - kz));
const long ksponge = MIN(ksx, ksz);
dtOmegaInvQ[kx * nz + kz] = dt * 2.0 * PI * freqQ / qInterior;
if (ksponge < nsponge) {
dtOmegaInvQ[kx * nz + kz] = dt * 2.0 * PI * freqQ / qprof[ksponge];
}
}
}
delete[] qprof;
}
/**
* Ported from ModelingQuasiPeriodic2D 2017.09.19
* Log decay of Q from qInterior to qmin with approach to boundary
*
* 2018.05.24
* - dt is now rolled up in this field to save compute in the fused derivs + time update method
* invQ ==> dtOmegaInvQ
*/
void setupDtOmegaInvQ_3D(
const long freeSurface,
const long nx,
const long ny,
const long nz,
const long nsponge,
const long nthread,
const float dt,
const float freqQ,
const float qMin,
const float qInterior,
float *dtOmegaInvQ) {
if (freqQ < FLT_EPSILON) {
char msg[1000];
sprintf(msg, "Error -- freqQ [%f] is too small!\n", freqQ);
perror(msg);
exit(EXIT_FAILURE);
}
// check for unphysical q values
if ((qMin < FLT_EPSILON) && (qInterior < FLT_EPSILON)) {
printf("Warning -- qMin and qMax unphysical, dtOmegaInvQ set to zero!\n");
memset(dtOmegaInvQ, 0, sizeof(dtOmegaInvQ));
return;
}
const long nynz = ny * nz;
float *qprof = new float[nsponge];
const float qmin = qMin;
const float qmax = qInterior;
const float lqmin = log(qmin);
const float lqmax = log(qmax);
for (long ksponge = 0; ksponge < nsponge; ksponge++){
const float dk = (float)(ksponge) / (float)(nsponge - 1);
const float lq = lqmin + dk * (lqmax - lqmin);
qprof[ksponge] = exp(lq);
}
#pragma omp parallel for num_threads(nthread) schedule(static)
for (long kz = 0; kz < nz; kz++) {
for (long kx = 0; kx < nx; kx++) {
const long kxnynz = kx * nynz;
#pragma omp simd
for (long ky = 0; ky < ny; ky++) {
const long ksx = MIN(kx, (nx - 1 - kx));
const long ksy = MIN(ky, (ny - 1 - ky));
const long ksz = (freeSurface) ? (nz - 1 - kz) : MIN(kz, (nz - 1 - kz));
const long ksponge = MIN(ksx, MIN(ksy, ksz));
const long kynz = ky * nz;
const long kxnynz_kynz = kxnynz + kynz;
dtOmegaInvQ[kxnynz_kynz + kz] = dt * 2 * PI * freqQ / qInterior;
if (ksponge < nsponge) {
dtOmegaInvQ[kxnynz_kynz + kz] = dt * 2 * PI * freqQ / qprof[ksponge];
}
}
}
}
delete [] qprof;
}
#endif
|
SpatialConvolutionMM.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialConvolutionMM.c"
#else
static inline void THNN_(SpatialConvolutionMM_shapeCheck)(
THTensor *input, THTensor *gradOutput,
THTensor *weight, THTensor *bias,
int kH, int kW, int dH, int dW, int padH, int padW) {
THArgCheck(kW > 0 && kH > 0, 9,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 11,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THNN_ARGCHECK(weight->nDimension == 2 || weight->nDimension == 4, 5, weight,
"2D or 4D weight tensor expected, but got: %s");
if (bias != NULL) {
THNN_CHECK_DIM_SIZE(bias, 1, 0, weight->size[0]);
}
int ndim = input->nDimension;
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THNN_ARGCHECK(ndim == 3 || ndim == 4, 2, input,
"3D or 4D input tensor expected but got: %s");
long nInputPlane = weight->size[1] / (kH * kW);
long inputHeight = input->size[dimh];
long inputWidth = input->size[dimw];
long nOutputPlane = weight->size[0];
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
if (outputWidth < 1 || outputHeight < 1)
THError("Given input size: (%d x %d x %d). "
"Calculated output size: (%d x %d x %d). Output size is too small",
nInputPlane,inputHeight,inputWidth,nOutputPlane,outputHeight,outputWidth);
THNN_CHECK_DIM_SIZE(input, ndim, dimf, nInputPlane);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
}
static THTensor* THNN_(view_weight_MM2d)(THTensor *weight) {
weight = THTensor_(newContiguous)(weight);
if (weight->nDimension == 4) {
long s1 = weight->size[0];
long s2 = weight->size[1] * weight->size[2] * weight->size[3];
THTensor *old_weight = weight;
weight = THTensor_(newWithStorage2d)(weight->storage, weight->storageOffset,
s1, -1, s2, -1);
THTensor_(free)(old_weight);
}
return weight;
}
static void THNN_(SpatialConvolutionMM_updateOutput_frame)(
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
long nInputPlane,
long inputWidth,
long inputHeight,
long nOutputPlane,
long outputWidth,
long outputHeight)
{
long i;
THTensor *output2d;
THNN_(unfolded_copy)(finput, input, kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
outputWidth, outputHeight);
output2d = THTensor_(newWithStorage2d)(output->storage, output->storageOffset,
nOutputPlane, -1,
outputHeight*outputWidth, -1);
if (bias) {
for(i = 0; i < nOutputPlane; i++)
THVector_(fill)
(output->storage->data + output->storageOffset + output->stride[0] * i,
THTensor_(get1d)(bias, i), outputHeight*outputWidth);
} else {
THTensor_(zero)(output);
}
THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput);
THTensor_(free)(output2d);
}
void THNN_(SpatialConvolutionMM_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THTensor *weight,
THTensor *bias,
THTensor *finput,
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH)
{
weight = THNN_(view_weight_MM2d)(weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, NULL, weight, bias, kH, kW, dH, dW, padH, padW);
input = THTensor_(newContiguous)(input);
int ndim = input->nDimension;
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
long nInputPlane = input->size[dimf];
long inputHeight = input->size[dimh];
long inputWidth = input->size[dimw];
long nOutputPlane = weight->size[0];
long outputHeight = (inputHeight + 2*padH - kH) / dH + 1;
long outputWidth = (inputWidth + 2*padW - kW) / dW + 1;
if(input->nDimension == 3)
{
THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth);
THNN_(SpatialConvolutionMM_updateOutput_frame)
(input, output, weight, bias, finput,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
}
else
{
long T = input->size[0];
long t;
THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth);
THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth);
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *input_t = THTensor_(newSelect)(input, 0, t);
THTensor *output_t = THTensor_(newSelect)(output, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionMM_updateOutput_frame)
(input_t, output_t, weight, bias, finput_t,
kW, kH, dW, dH, padW, padH,
nInputPlane, inputWidth, inputHeight,
nOutputPlane, outputWidth, outputHeight);
THTensor_(free)(input_t);
THTensor_(free)(output_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionMM_updateGradInput_frame)(
THTensor *gradInput,
THTensor *gradOutput,
THTensor *weight,
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH)
{
THTensor *gradOutput2d = THTensor_(newWithStorage2d)
(gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2], -1);
THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d);
THTensor_(free)(gradOutput2d);
THTensor_(zero)(gradInput);
THNN_(unfolded_acc)(fgradInput, gradInput, kW, kH, dW, dH,
padW, padH,
gradInput->size[0], gradInput->size[2], gradInput->size[1],
gradOutput->size[2], gradOutput->size[1]);
}
void THNN_(SpatialConvolutionMM_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THTensor *weight,
THTensor *finput,
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH)
{
weight = THNN_(view_weight_MM2d)(weight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, gradOutput, weight, NULL, kH, kW, dH, dW, padH, padW);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
THTensor_(resizeAs)(gradInput, input);
THTensor_(resizeAs)(fgradInput, finput);
// depending on the BLAS library, fgradInput (result tensor) might
// be left uninitialized on zero alpha, which might lead to weird behavior
// hence, to be safe, zero it
THTensor_(zero)(fgradInput);
THTensor *tweight = THTensor_(new)();
THTensor_(transpose)(tweight, weight, 0, 1);
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput,
tweight, fgradInput,
kW, kH, dW, dH, padW, padH);
}
else
{
long T = input->size[0];
long t;
#pragma omp parallel for private(t)
for(t = 0; t < T; t++)
{
THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t);
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t);
THNN_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t,
tweight, fgradInput_t,
kW, kH, dW, dH, padW, padH);
THTensor_(free)(gradInput_t);
THTensor_(free)(gradOutput_t);
THTensor_(free)(fgradInput_t);
}
}
THTensor_(free)(tweight);
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
}
static void THNN_(SpatialConvolutionMM_accGradParameters_frame)(
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
real scale)
{
long i;
THTensor *gradOutput2d = THTensor_(newWithStorage2d)
(gradOutput->storage, gradOutput->storageOffset,
gradOutput->size[0], -1,
gradOutput->size[1]*gradOutput->size[2], -1);
THTensor *tfinput = THTensor_(new)();
THTensor_(transpose)(tfinput, finput, 0, 1);
THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, tfinput);
THTensor_(free)(tfinput);
if (gradBias) {
for(i = 0; i < gradBias->size[0]; i++)
{
long k;
real sum = 0;
real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0];
for(k = 0; k < gradOutput2d->size[1]; k++)
sum += data[k];
(gradBias->storage->data + gradBias->storageOffset)[i] += scale*sum;
}
}
THTensor_(free)(gradOutput2d);
}
void THNN_(SpatialConvolutionMM_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *finput,
THTensor *fgradInput,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
accreal scale_)
{
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
if (gradBias)
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
gradWeight = THNN_(view_weight_MM2d)(gradWeight);
THNN_(SpatialConvolutionMM_shapeCheck)
(input, gradOutput, gradWeight, gradBias, kH, kW, dH, dW, padH, padW);
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
if(input->nDimension == 3)
{
THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight,
gradBias, finput, scale);
}
else
{
long T = input->size[0];
long t;
for(t = 0; t < T; t++)
{
THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t);
THTensor *finput_t = THTensor_(newSelect)(finput, 0, t);
THNN_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight,
gradBias, finput_t, scale);
THTensor_(free)(gradOutput_t);
THTensor_(free)(finput_t);
}
}
THTensor_(free)(input);
THTensor_(free)(gradOutput);
THTensor_(free)(gradWeight);
}
#endif
|
GB_unop__identity_fp32_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_fp32_fp64)
// op(A') function: GB (_unop_tran__identity_fp32_fp64)
// C type: float
// A type: double
// cast: float cij = (float) aij
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
float z = (float) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = (float) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_fp32_fp64)
(
float *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
float z = (float) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_fp32_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__gt_fp32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_fp32)
// A*D function (colscale): GB (_AxD__gt_fp32)
// D*A function (rowscale): GB (_DxB__gt_fp32)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_fp32)
// C=scalar+B GB (_bind1st__gt_fp32)
// C=scalar+B' GB (_bind1st_tran__gt_fp32)
// C=A+scalar GB (_bind2nd__gt_fp32)
// C=A'+scalar GB (_bind2nd_tran__gt_fp32)
// C type: bool
// A type: float
// B,b type: float
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
float bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_FP32 || GxB_NO_GT_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = Bx [p] ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = Ax [p] ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = Ax [pA] ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fft-cuda.c | /* Copyright 2013, 2015. The Regents of the University of California.
* Copyright 2019. Uecker Lab, University Medical Center Göttingen.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2019 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* Christian Holme <christian.holme@med.uni-goettingen.de>
*
*
* Internal interface to the CUFFT library used in fft.c.
*/
#include <stdbool.h>
#include <complex.h>
#include <assert.h>
#include <limits.h>
#include "misc/misc.h"
#include "num/multind.h"
#include "fft-cuda.h"
#ifdef USE_CUDA
#include <cufft.h>
#include "num/gpuops.h"
#ifndef CFL_SIZE
#define CFL_SIZE sizeof(complex float)
#endif
struct fft_cuda_plan_s {
cufftHandle cufft;
struct fft_cuda_plan_s* chain;
bool backwards;
long batch;
long idist;
long odist;
};
struct iovec {
long n;
long is;
long os;
};
// detect if flags has blocks of 1's seperated by 0's
static bool noncontiguous_flags(int D, unsigned long flags)
{
bool o = false;
bool z = false;
for (int i = 0; i < D; i++) {
bool curr_bit = MD_IS_SET(flags, i);
if (curr_bit) // found a block of ones
o = true;
if (o && !curr_bit) // found the end of a block of ones
z = true;
if (o && z && curr_bit) // found a second block of ones
return true;
}
return false;
}
static struct fft_cuda_plan_s* fft_cuda_plan0(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards)
{
// TODO: This is not optimal, as it will often create separate fft's where they
// are not needed. And since we compute blocks, we could also recurse
// into both blocks...
if (noncontiguous_flags(D, flags))
return NULL;
PTR_ALLOC(struct fft_cuda_plan_s, plan);
unsigned int N = D;
plan->batch = 1;
plan->odist = 0;
plan->idist = 0;
plan->backwards = backwards;
plan->chain = NULL;
struct iovec dims[N];
struct iovec hmdims[N];
assert(0 != flags);
// the cufft interface is strange, but we do our best...
unsigned int k = 0;
unsigned int l = 0;
for (unsigned int i = 0; i < N; i++) {
if (1 == dimensions[i])
continue;
if (MD_IS_SET(flags, i)) {
dims[k].n = dimensions[i];
dims[k].is = istrides[i] / CFL_SIZE;
dims[k].os = ostrides[i] / CFL_SIZE;
k++;
} else {
hmdims[l].n = dimensions[i];
hmdims[l].is = istrides[i] / CFL_SIZE;
hmdims[l].os = ostrides[i] / CFL_SIZE;
l++;
}
}
assert(k > 0);
int cudims[k];
int cuiemb[k];
int cuoemb[k];
long batchdims[l];
long batchistr[l];
long batchostr[l];
int lis = dims[0].is;
int los = dims[0].os;
if (k > 3)
goto errout;
for (unsigned int i = 0; i < k; i++) {
// assert(dims[i].is == lis);
// assert(dims[i].os == los);
cudims[k - 1 - i] = dims[i].n;
cuiemb[k - 1 - i] = dims[i].n;
cuoemb[k - 1 - i] = dims[i].n;
lis = dims[i].n * dims[i].is;
los = dims[i].n * dims[i].os;
}
for (unsigned int i = 0; i < l; i++) {
batchdims[i] = hmdims[i].n;
batchistr[i] = hmdims[i].is;
batchostr[i] = hmdims[i].os;
}
int istride = dims[0].is;
int ostride = dims[0].os;
int idist = lis;
int odist = los;
int cubs = 1;
// check that batch dimensions can be collapsed to one
unsigned int bi = md_calc_blockdim(l, batchdims, batchistr, hmdims[0].is);
unsigned int bo = md_calc_blockdim(l, batchdims, batchostr, hmdims[0].os);
if (bi != bo)
goto errout;
if (bi > 0) {
idist = hmdims[0].is;
odist = hmdims[0].os;
cubs = md_calc_size(bi, batchdims);
}
if (l != bi) {
// check that batch dimensions can be collapsed to one
if (l - bi != md_calc_blockdim(l - bi, batchdims + bi, batchistr + bi, hmdims[bi].is))
goto errout;
if (l - bo != md_calc_blockdim(l - bo, batchdims + bo, batchostr + bo, hmdims[bo].os))
goto errout;
plan->idist = hmdims[bi].is;
plan->odist = hmdims[bo].os;
plan->batch = md_calc_size(l - bi, batchdims + bi);
}
assert(k <= 3);
int err;
#pragma omp critical
err = cufftPlanMany(&plan->cufft, k,
cudims, cuiemb, istride, idist,
cuoemb, ostride, odist, CUFFT_C2C, cubs);
if (CUFFT_SUCCESS != err)
goto errout;
return PTR_PASS(plan);
errout:
PTR_FREE(plan);
return NULL;
}
static unsigned long find_msb(unsigned long flags)
{
for (unsigned int i = 1; i < CHAR_BIT * sizeof(flags); i *= 2)
flags |= flags >> i;
return (flags + 1) / 2;
}
struct fft_cuda_plan_s* fft_cuda_plan(unsigned int D, const long dimensions[D], unsigned long flags, const long ostrides[D], const long istrides[D], bool backwards)
{
assert(0u != flags);
assert(0u == (flags & ~md_nontriv_dims(D, dimensions)));
struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, flags, ostrides, istrides, backwards);
if (NULL != plan)
return plan;
unsigned long msb = find_msb(flags);
if (flags & msb) {
struct fft_cuda_plan_s* plan = fft_cuda_plan0(D, dimensions, msb, ostrides, istrides, backwards);
if (NULL == plan)
return NULL;
plan->chain = fft_cuda_plan(D, dimensions, flags & ~msb, ostrides, ostrides, backwards);
if (NULL == plan->chain) {
fft_cuda_free_plan(plan);
return NULL;
}
return plan;
}
return NULL;
}
void fft_cuda_free_plan(struct fft_cuda_plan_s* cuplan)
{
if (NULL != cuplan->chain)
fft_cuda_free_plan(cuplan->chain);
cufftDestroy(cuplan->cufft);
xfree(cuplan);
}
void fft_cuda_exec(struct fft_cuda_plan_s* cuplan, complex float* dst, const complex float* src)
{
assert(cuda_ondevice(src));
assert(cuda_ondevice(dst));
assert(NULL != cuplan);
int err;
for (int i = 0; i < cuplan->batch; i++) {
if (CUFFT_SUCCESS != (err = cufftExecC2C(cuplan->cufft,
(cufftComplex*)src + i * cuplan->idist,
(cufftComplex*)dst + i * cuplan->odist,
(!cuplan->backwards) ? CUFFT_FORWARD : CUFFT_INVERSE)))
error("CUFFT: %d\n", err);
}
if (NULL != cuplan->chain)
fft_cuda_exec(cuplan->chain, dst, dst);
}
#endif
|
GB_binop__rdiv_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__rdiv_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rdiv_fp64)
// A*D function (colscale): GB (_AxD__rdiv_fp64)
// D*A function (rowscale): GB (_DxB__rdiv_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__rdiv_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__rdiv_fp64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rdiv_fp64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rdiv_fp64)
// C=scalar+B GB (_bind1st__rdiv_fp64)
// C=scalar+B' GB (_bind1st_tran__rdiv_fp64)
// C=A+scalar GB (_bind2nd__rdiv_fp64)
// C=A'+scalar GB (_bind2nd_tran__rdiv_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (bij / aij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (y / x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_FP64 || GxB_NO_RDIV_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rdiv_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rdiv_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__rdiv_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rdiv_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rdiv_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = Bx [p] ;
Cx [p] = (bij / x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rdiv_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = Ax [p] ;
Cx [p] = (y / aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij / x) ; \
}
GrB_Info GB (_bind1st_tran__rdiv_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (y / aij) ; \
}
GrB_Info GB (_bind2nd_tran__rdiv_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
psimci.c | /*********************************************************************
* Program: psimci.c
* Author: Mauricio Caceres Bravo <caceres@nber.org>
* Created: Sun Feb 12 19:28:43 EST 2017
* Updated: Tue May 30 18:11:50 EDT 2017
* Purpose: Stata plugin to simulate a CI under H0: b = 0 for a
* treatment effect given a regression specification.
* Note: See stata.com/plugins for more on Stata plugins
*********************************************************************/
/**
* @file psimci.c
* @author Mauricio Caceres bravo
* @date 30 May 2017
* @brief Stata plugin to simulate a CI for a placebo treatment.
*
* See the documentation for simci.ado (e.g. help simci from Stata)
*
* @see http://www.stata.com/plugins
*/
#include <math.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <gsl/gsl_blas.h>
#include <gsl/gsl_linalg.h>
#include <gsl/gsl_matrix_double.h>
#include <gsl/gsl_permutation.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_sort_vector.h>
#include "psimci.h"
#include "stplugin.h"
#include "stutils.c"
/**
* @brief Main function call will execute as Stata plugin
*
* The function takes the first variable in @argc as the dependent
* variable and the next k - 1 variables are covariates. @argc contains
* the comma options passed by Stata. Currently just the proportion
* randomzed and the number of simulations to run. See documentation for
* simci.ado or sim_ci below for moew.
*
* @param argc List of variables to use
* @param argv Comma options from Stata
* @return Modified variables in Stata
* @see Documentation for simci.ado
* @warning This is meant to be run from simci.ado and not by itself
*/
STDLL stata_call(int argc, char *argv[])
{
// Simple call to see if the plugin loaded
char buffer[16]; strcpy (buffer, argv[0]);
if ( strcmp(buffer, "check") == 0 ) return (0);
// Initialize the variables to use
ST_int i, j ;
ST_double z ;
ST_retcode rc ;
// Get P and number of reps. Note the 0-based indexing! So the
// functiona ssumes P and reps were the 1st and 3nd argument.
double P = strtod (argv[0], NULL);
int reps = strtod (argv[1], NULL);
const size_t n = SF_in2();
const int k = SF_nvars();
// If too few variables (at least 2 for regressio), exit
if (k < 2) {
return (102) ;
}
// Initialize GSL elements where to store data
gsl_matrix *X = gsl_matrix_alloc (n, k + 1);
gsl_vector *y = gsl_vector_alloc (n);
// Not sure if there is another way to read data vs the double loop.
// Note: Careful with the 0-based indexing!
for (i = SF_in1(); i <= SF_in2(); i++) {
if (SF_ifobs(i)) {
// Variables 2 through k are covariates
for (j = 2; j <= k; j++) {
// Note we leave the first column empty
if ( (rc = SF_vdata(j, i, &z)) ) return(rc);
gsl_matrix_set (X, i - 1, j - 1, z);
}
// Note we add the constant
gsl_matrix_set (X, i - 1, k, 1.0);
// Variable 1 is the dependent variable
if ( (rc = SF_vdata(1, i, &z)) ) return(rc);
gsl_vector_set (y, i - 1, z);
}
}
// Now we call the simulation function and output the results into b, mu
gsl_vector *b = gsl_vector_alloc (reps);
gsl_vector *mu = gsl_vector_alloc (reps);
sim_ci (X, y, P, reps, b, mu);
// Not sure that there is a good way to output this into Stata So I
// write to a file and read it back.
char outb[64], outmu[64];
strcpy (outb, argv[2]);
strcpy (outmu, argv[2]);
strcat (outb, "b");
strcat (outmu, "mu");
FILE *fb = fopen (outb, "wb");
FILE *fmu = fopen (outmu, "wb");
gsl_vector_fprintf (fb, b, "%15.9f");
gsl_vector_fprintf (fmu, mu, "%15.9f");
fclose (fb);
fclose (fmu);
// Cleanup
gsl_matrix_free (X);
gsl_vector_free (y);
gsl_vector_free (b);
gsl_vector_free (mu);
return (0);
}
/**
* @brief Simulate a confidence interval given X, y
*
* The idea is to simulate a non-parametric CI based on placebo
* assignments of a treatment variable. The program assigns
* treatment at random, hence a null effect, to individuals or
* clusters, optionally stratifying by any number of variables (or
* the means thereof, in the case of clusters). Consider
*
* Y_ij = a + b T_j + g X_ij + e_ij
*
* There are C = J choose PJ ways to treat the clusters (or C =
* P choose PN in the case of individuals). If we computed b_ols
* for c = 1, ..., C we would know the exact distribution of our
* estimator, conditional on the data being representative of the
* study data. C is typically intractably large, hence we simulate
* K draws with sum(T_jk = PJ) and run
*
* Y_ij = a + b_k T_jk + g X_ij + e_ij
*
* Let Fhat be the empirical cdf of b_k; a valid 1 - alpha CI for
* b is given by
*
* CI(1 - a) = [Fhat^-1(a / 2), Fhat^-1(1 - a / 2)]
*
* The function takes the @X as the covariate matrix, which
* must have k + 1 columns with the first column free, @y as
* the dependent variable, and outputs the results to @b, @mu
*
* @param X Covariate matrix with first column blank
* @param y Dependent variable
* @param P Proportion in treatment
* @param reps Number of reprtitions
* @param b Vector of length @reps; will output coefficients here
* @param mu Vector of length @reps; will output control means here
* @return Modified @b, @mu with coefficients and means
* @see Documentation for simci.ado
*/
int sim_ci (const gsl_matrix * X,
const gsl_vector * y,
const double P,
const int reps,
gsl_vector * b,
gsl_vector * mu)
{
const size_t n = X->size1;
const int k = X->size2;
const int np = ceil(n * P);
const int nc = n - np;
double *sy = malloc (sizeof(double));
gsl_vector *ones = gsl_vector_alloc (n);
gsl_vector_set_all (ones, 1.0);
gsl_blas_ddot (ones, y, sy);
// Set the random seed based on the time of day (seconds)
srand (time(NULL));
gsl_rng *rng = gsl_rng_alloc (gsl_rng_default);
gsl_rng_set (rng, rand());
// Get vector of 1s and 0s
gsl_vector *T = gsl_vector_alloc (n);
gsl_vector_set_zero (T);
for (int i = 0; i < np; i++) {
gsl_vector_set (T, i, 1.0);
}
// Initialize elements for parallel loop
gsl_vector *Tp ;
gsl_matrix *Xp ;
int nloops ;
double *sty ;
// Get the number of threads available to OMP
sf_printf("Parallelizing simulation; %d threads found:\n",
get_omp_num_threads());
// Parallelize execution: Note We need a copy of Xp and Tp for each
// thread since they will be modified at each iteration y does not
// change, so it's shared.
#pragma omp parallel private(Xp, Tp, nloops, sty) shared(y, b, sy)
{
nloops = 0;
// Allocate to each therad their own copy
Tp = gsl_vector_alloc (n);
Xp = gsl_matrix_alloc (n, k);
sty = malloc (sizeof(double));
gsl_vector_memcpy (Tp, T);
gsl_matrix_memcpy (Xp, X);
// Parallel for loop through simulation
#pragma omp for
for (int r = 0; r < reps; r++) {
// 1. Shuffle treatment
// 2. Set as first column of covariate matrix
// 3. Get mean of y over controls
// 4. Store coefficient/mean
// 5. Repeat 1-4
// 6. ...
// 7. Profit?
gsl_ran_shuffle (rng, Tp->data, n, sizeof(size_t));
gsl_matrix_set_col (Xp, 0, Tp);
gsl_vector_set (b, r, sim_ols(Xp, y));
gsl_blas_ddot (Tp, y, sty);
gsl_vector_set (mu, r, (*sy - *sty) / nc);
++nloops;
}
// I want to print a pretty message saying how many iterations
// each thread completed. Since threads finish on their own,
// messages would be print at disparate times. However, one can
// specify "critical" code which is executed only after all
// threads are done running.
#pragma omp critical
{
sf_printf("\tThread %d performed %d simulations.\n",
omp_get_thread_num(), nloops);
}
// Cleanup
gsl_matrix_free (Xp);
gsl_vector_free (Tp);
}
// Cleanup
gsl_vector_free (T);
gsl_rng_free (rng);
return (0);
}
/**
* @brief Number of threads available to OMP
*
* Short wrapper to get number of threads available to OMP
*
* @return Number of threads available to OMP
*/
int get_omp_num_threads()
{
int thread_id;
int nthreads = 0;
#pragma omp parallel private(thread_id) shared(nthreads)
{
thread_id = omp_get_thread_num();
#pragma omp critical
{
nthreads = thread_id > nthreads? thread_id: nthreads;
}
}
nthreads++;
return (nthreads);
}
/**
* @brief Wrapper to run a linear regression
*
* All I want is the first coefficient of a linear regression. For
*
* Y = X beta
*
* I want (X' X)^-1 X' Y. GSL has solvers for a system of the form
*
* Ax = b
*
* Where A is a symmetric matrix. Take A = X' X and b = X' y, then
* we can use any number of routines to find x (especially since A
* is now symmetric).
*
* @param X A n by k gsl matrix containing covariates.
* @param y A n by 1 gsl vector containing the dependent variable
* @return The first coefficient of a linear regression.
* @warning This is meant to be run within the main loop of stata_call
*/
double sim_ols(const gsl_matrix * X, const gsl_vector * y)
{
// Allocate memory to express the system as Ax = b
gsl_matrix *A = gsl_matrix_alloc (X->size2, X->size2);
gsl_vector *b = gsl_vector_alloc (X->size2);
gsl_vector *x = gsl_vector_alloc (X->size2);
// Set A = X' X and b = X' y
gsl_blas_dgemm (CblasTrans, CblasNoTrans, 1.0, X, X, 0.0, A);
gsl_blas_dgemv (CblasTrans, 1.0, X, y, 0.0, b);
// Cholesky decomposition
gsl_linalg_cholesky_decomp1 (A);
gsl_linalg_cholesky_solve (A, b, x);
// You don't have to use Cholesky; a number of methods are available
//
// int s;
// gsl_permutation * P = gsl_permutation_alloc (X->size2);
// gsl_vector * tau = gsl_vector_alloc (X->size2);
//
// Householder
// gsl_linalg_HH_solve (A, b, x);
//
// LU decomposition
// gsl_linalg_LU_decomp (A, P, &s);
// gsl_linalg_LU_solve (A, P, b, x);
// gsl_permutation_free (P);
//
// QR decomposition
// gsl_linalg_QR_decomp (A, tau);
// gsl_linalg_QR_solve (A, tau, b, x);
// gsl_vector_free (tau);
// Free up space
gsl_matrix_free (A);
gsl_vector_free (b);
return (gsl_vector_get(x, 0));
}
/**
* @brief Get pctile of a function
*
* Basic wrapper to get the @pctile percentile of a function.
*
* @param x n by 1 gsl vector whose percentile we want.
* @param pctile Percentile
* @return @pctile percentile of x
*/
double sim_pctile(gsl_vector * x, double pctile)
{
gsl_sort_vector (x);
int n = x->size;
int i = floor(n * pctile);
double qq = gsl_vector_get (x, i);
if (i / n == pctile) {
qq = (qq + gsl_vector_get (x, i + 1)) / 2;
}
return (qq);
}
|
multind.c | /* Copyright 2013-2015 The Regents of the University of California.
* Copyright 2016-2020. Uecker Lab. University Medical Center Göttingen.
* Copyright 2017. Intel Corporation.
* All rights reserved. Use of this source code is governed by
* a BSD-style license which can be found in the LICENSE file.
*
* Authors:
* 2012-2020 Martin Uecker <martin.uecker@med.uni-goettingen.de>
* 2019-2020 Sebastian Rosenzweig
* 2013 Frank Ong <frankong@berkeley.edu>
* 2017 Michael J. Anderson <michael.j.anderson@intel.com>
*
* Generic operations on multi-dimensional arrays. Most functions
* come in two flavours:
*
* 1. A basic version which takes the number of dimensions, an array
* of long integers specifing the size of each dimension, the pointers
* to the data, and the size of each element and other required parameters.
* The data is assumed to be stored in column-major format.
*
* 2. An extended version which takes an array of long integers which
* specifies the strides for each argument.
*
* All functions should work on CPU and GPU and md_copy can be used
* to copy between CPU and GPU.
*
*/
#define _GNU_SOURCE
#include <string.h>
#include <assert.h>
#include <stdbool.h>
#include <alloca.h>
#include <strings.h>
#include "misc/misc.h"
#include "misc/types.h"
#include "misc/debug.h"
#include "misc/nested.h"
#include "num/optimize.h"
#ifdef USE_CUDA
#include "num/gpuops.h"
#endif
#include "multind.h"
/**
* Generic functions which loops over all dimensions of a set of
* multi-dimensional arrays and calls a given function for each position.
*/
void md_nary(unsigned int C, unsigned int D, const long dim[D], const long* str[C], void* ptr[C], md_nary_fun_t fun)
{
if (0 == D) {
NESTED_CALL(fun, (ptr));
return;
}
for (long i = 0; i < dim[D - 1]; i++) {
void* moving_ptr[C];
for (unsigned int j = 0; j < C; j++)
moving_ptr[j] = ptr[j] + i * str[j][D - 1];
md_nary(C, D - 1, dim, str, moving_ptr, fun);
}
}
/**
* Generic functions which loops over all dimensions of a set of
* multi-dimensional arrays and calls a given function for each position.
* This functions tries to parallelize over the dimensions indicated
* with flags.
*/
void md_parallel_nary(unsigned int C, unsigned int D, const long dim[D], unsigned long flags, const long* str[C], void* ptr[C], md_nary_fun_t fun)
{
if (0 == flags) {
md_nary(C, D, dim, str, ptr, fun);
return;
}
long dimc[D];
md_select_dims(D, ~flags, dimc, dim);
// Collect all parallel dimensions
int nparallel = 0;
int parallel_b[D];
long parallel_dim[D];
long total_iterations = 1L;
while (0 != flags) {
int b = ffsl(flags & -flags) - 1;
assert(MD_IS_SET(flags, b));
flags = MD_CLEAR(flags, b);
debug_printf(DP_DEBUG4, "Parallelize: %d\n", dim[b]);
parallel_b[nparallel] = b;
parallel_dim[nparallel] = dim[b];
total_iterations *= parallel_dim[nparallel];
nparallel++;
}
#pragma omp parallel for
for (long i = 0; i < total_iterations; i++) {
// Recover place in parallel iteration space
long iter_i[D];
long ii = i;
for (int p = nparallel - 1; p >= 0; p--) {
iter_i[p] = ii % parallel_dim[p];
ii /= parallel_dim[p];
}
void* moving_ptr[C];
for (unsigned int j = 0; j < C; j++) {
moving_ptr[j] = ptr[j];
for(int p = 0; p < nparallel; p++)
moving_ptr[j] += iter_i[p] * str[j][parallel_b[p]];
}
md_nary(C, D, dimc, str, moving_ptr, fun);
}
}
static void md_parallel_loop_r(unsigned int D, unsigned int N, const long dim[static N], unsigned int flags, const long pos[static N], md_loop_fun_t fun)
{
if (0 == D) {
NESTED_CALL(fun, (pos));
return;
}
D--;
// we need to make a copy because firstprivate needs to see
// an array instead of a pointer
long pos_copy[N];
for (unsigned int i = 0; i < N; i++)
pos_copy[i] = pos[i];
#pragma omp parallel for firstprivate(pos_copy) if ((1 < dim[D]) && (flags & (1 << D)))
for (int i = 0; i < dim[D]; i++) {
pos_copy[D] = i;
md_parallel_loop_r(D, N, dim, flags, pos_copy, fun);
}
}
/**
* Generic function which loops over all dimensions and calls a given
* function passing the current indices as argument.
*
* Runs fun(data, position) for all position in dim
*
*/
void md_parallel_loop(unsigned int D, const long dim[static D], unsigned long flags, md_loop_fun_t fun)
{
long pos[D];
md_parallel_loop_r(D, D, dim, flags, pos, fun);
}
static void md_loop_r(unsigned int D, const long dim[D], long pos[D], md_loop_fun_t fun)
{
if (0 == D) {
NESTED_CALL(fun, (pos));
return;
}
D--;
for (pos[D] = 0; pos[D] < dim[D]; pos[D]++)
md_loop_r(D, dim, pos, fun);
}
/**
* Generic function which loops over all dimensions and calls a given
* function passing the current indices as argument.
*
* Runs fun( position ) for all position in dim
*
*/
void md_loop(unsigned int D, const long dim[D], md_loop_fun_t fun)
{
long pos[D];
md_loop_r(D, dim, pos, fun);
}
/**
* Computes the next position. Returns true until last index.
*/
bool md_next(unsigned int D, const long dims[D], unsigned long flags, long pos[D])
{
if (0 == D--)
return false;
if (md_next(D, dims, flags, pos))
return true;
if (MD_IS_SET(flags, D)) {
assert((0 <= pos[D]) && (pos[D] < dims[D]));
if (++pos[D] < dims[D])
return true;
pos[D] = 0;
}
return false;
}
/**
* Returns offset for position in a multidimensional array
*
* return pos[0]*strides[0] + ... + pos[D-1]*strides[D-1]
*
* @param D number of dimensions
* @param dim dimensions array
*/
long md_calc_offset(unsigned int D, const long strides[D], const long position[D])
{
long pos = 0;
for (unsigned int i = 0; i < D; i++)
pos += strides[i] * position[i];
return pos;
}
static long md_calc_size_r(unsigned int D, const long dim[D], size_t size)
{
if (0 == D)
return size;
return md_calc_size_r(D - 1, dim, size * dim[D - 1]);
}
/**
* Returns the number of elements
*
* return dim[0]*dim[1]*...*dim[D-1]
*
* @param D number of dimensions
* @param dim dimensions array
*/
long md_calc_size(unsigned int D, const long dim[D])
{
return md_calc_size_r(D, dim, 1);
}
/**
* Computes the number of smallest dimensions which are stored
* contineously, i.e. can be accessed as a block of memory.
*
*/
unsigned int md_calc_blockdim(unsigned int D, const long dim[D], const long str[D], size_t size)
{
long dist = size;
unsigned int i = 0;
for (i = 0; i < D; i++) {
if (!((str[i] == dist) || (dim[i] == 1)))
break;
dist *= dim[i];
}
return i;
}
/**
* Copy dimensions specified by flags and set remaining dimensions to 1
*
* odims = [ 1 idims[1] idims[2] 1 1 idims[5] ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to copy
* @param odims output dimensions
* @param idims input dimensions
*/
void md_select_dims(unsigned int D, unsigned long flags, long odims[D], const long idims[D])
{
md_copy_dims(D, odims, idims);
for (unsigned int i = 0; i < D; i++)
if (!MD_IS_SET(flags, i))
odims[i] = 1;
}
/**
* Copy dimensions
*
* odims[i] = idims[i]
*/
void md_copy_dims(unsigned int D, long odims[D], const long idims[D])
{
memcpy(odims, idims, D * sizeof(long));
}
/**
* Copy strides
*
* ostrs[i] = istrs[i]
*/
void md_copy_strides(unsigned int D, long ostrs[D], const long istrs[D])
{
memcpy(ostrs, istrs, D * sizeof(long));
}
/**
* Set all dimensions to value
*
* dims[i] = val
*/
void md_set_dims(unsigned int D, long dims[D], long val)
{
for (unsigned int i = 0; i < D; i++)
dims[i] = val;
}
/**
* returns whether or not @param pos is a valid index of an array of dimension @param dims
*/
bool md_is_index(unsigned int D, const long pos[D], const long dims[D])
{
if (D == 0)
return true;
return ((pos[0] >= 0) && (pos[0] < dims[0]) && md_is_index(D - 1, pos + 1, dims + 1));
}
/**
* return whether some other dimensions are >1
*/
bool md_check_dimensions(unsigned int N, const long dims[N], unsigned int flags)
{
long d[N];
md_select_dims(N, ~flags, d, dims);
return (1 != md_calc_size(N, d));
}
/**
* Check if dimensions at 'flags' position are equal
*/
bool md_check_equal_dims(unsigned int N, const long dims1[N], const long dims2[N], unsigned int flags)
{
return ( md_check_bounds(N, flags, dims1, dims2)
&& md_check_bounds(N, flags, dims2, dims1));
}
/*
* compute non-trivial (> 1) dims
*/
unsigned long md_nontriv_dims(unsigned int D, const long dims[D])
{
unsigned long flags = 0;
for (unsigned int i = 0; i < D; i++)
if (dims[i] > 1)
flags = MD_SET(flags, i);
return flags;
}
/*
* compute non-trivial (!= 0) strides
*/
unsigned long md_nontriv_strides(unsigned int D, const long strs[D])
{
unsigned long flags = 0;
for (unsigned int i = 0; i < D; i++)
if (strs[i] != 0)
flags = MD_SET(flags, i);
return flags;
}
/**
* Set all dimensions to one
*
* dims[i] = 1
*/
void md_singleton_dims(unsigned int D, long dims[D])
{
for (unsigned int i = 0; i < D; i++)
dims[i] = 1;
}
/**
* Set all strides to one
*
* dims[i] = 1
*/
void md_singleton_strides(unsigned int D, long strs[D])
{
for (unsigned int i = 0; i < D; i++)
strs[i] = 0;
}
/**
* Check dimensions for compatibility. Dimensions must be equal or
* where indicated by a set bit in flags one must be equal to one
* in atleast one of the arguments.
*/
bool md_check_compat(unsigned int D, unsigned long flags, const long dim1[D], const long dim2[D])
{
if (0 == D)
return true;
D--;
if ((dim1[D] == dim2[D]) || (MD_IS_SET(flags, D) && ((1 == dim1[D]) || (1 == dim2[D]))))
return md_check_compat(D, flags, dim1, dim2);
return false;
}
void md_merge_dims(unsigned int N, long out_dims[N], const long dims1[N], const long dims2[N])
{
assert(md_check_compat(N, ~0UL, dims1, dims2));
for (unsigned int i = 0; i < N; i++)
out_dims[i] = (1 == dims1[i]) ? dims2[i] : dims1[i];
}
/**
* dim1 must be bounded by dim2 where a bit is set
*/
bool md_check_bounds(unsigned int D, unsigned long flags, const long dim1[D], const long dim2[D])
{
if (0 == D--)
return true;
if (!MD_IS_SET(flags, D) || (dim1[D] <= dim2[D]))
return md_check_bounds(D, flags, dim1, dim2);
return false;
}
/**
* Set the output's flagged dimensions to the minimum of the two input dimensions
*
* odims = [ MIN(idims1[0],idims2[0] ... MIN(idims1[D-1],idims2[D-1]) ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to minimize
* @param odims output dimensions
* @param idims1 input 1 dimensions
* @param idims2 input 2 dimensions
*/
void md_min_dims(unsigned int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D])
{
for (unsigned int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
odims[i] = MIN(idims1[i], idims2[i]);
}
/**
* Set the output's flagged dimensions to the maximum of the two input dimensions
*
* odims = [ MAX(idims1[0],idims2[0] ... MAX(idims1[D-1],idims2[D-1]) ]
*
* @param D number of dimensions
* @param flags bitmask specifying which dimensions to maximize
* @param odims output dimensions
* @param idims1 input 1 dimensions
* @param idims2 input 2 dimensions
*/
void md_max_dims(unsigned int D, unsigned long flags, long odims[D], const long idims1[D], const long idims2[D])
{
for (unsigned int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
odims[i] = MAX(idims1[i], idims2[i]);
}
/**
* Zero out array (with strides)
*
* ptr[i] = 0
*/
void md_clear2(unsigned int D, const long dim[D], const long str[D], void* ptr, size_t size)
{
const long (*nstr[1])[D] = { (const long (*)[D])str };
#ifdef USE_CUDA
bool use_gpu = cuda_ondevice(ptr);
#endif
unsigned long flags = 0;
for (unsigned int i = 0; i < D; i++)
if (0 == str[i])
flags |= MD_BIT(i);
long dim2[D];
md_select_dims(D, ~flags, dim2, dim);
NESTED(void, nary_clear, (struct nary_opt_data_s* opt_data, void* ptr[]))
{
size_t size2 = size * opt_data->size;
#ifdef USE_CUDA
if (use_gpu) {
cuda_clear(size2, ptr[0]);
return;
}
#endif
memset(ptr[0], 0, size2);
};
optimized_nop(1, MD_BIT(0), D, dim2, nstr, (void*[1]){ ptr }, (size_t[1]){ size }, nary_clear);
}
/**
* Calculate strides in column-major format
* (smallest index is sequential)
*
* @param D number of dimensions
* @param array of calculates strides
* @param dim array of dimensions
* @param size of a single element
*/
long* md_calc_strides(unsigned int D, long str[D], const long dim[D], size_t size)
{
long old = size;
for (unsigned int i = 0; i < D; i++) {
str[i] = (1 == dim[i]) ? 0 : old;
old *= dim[i];
}
return str;
}
/**
* Zero out array (without strides)
*
* ptr[i] = 0
*
* @param D number of dimensions
* @param dim dimensions array
* @param ptr pointer to data to clear
* @param size sizeof()
*/
void md_clear(unsigned int D, const long dim[D], void* ptr, size_t size)
{
md_clear2(D, dim, MD_STRIDES(D, dim, size), ptr, size);
}
/**
* Copy array (with strides)
*
* optr[i] = iptr[i]
*/
void md_copy2(unsigned int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
#if 0
// this is for a fun comparison between our copy engine and FFTW
extern void fft2(unsigned int D, const long dim[D], unsigned int flags,
const long ostr[D], void* optr, const long istr[D], const void* iptr);
if (sizeof(complex float) == size)
fft2(D, dim, 0, ostr, optr, istr, iptr);
#endif
#ifdef USE_CUDA
bool use_gpu = cuda_ondevice(optr) || cuda_ondevice(iptr);
#if 1
//less calls for filling-like copies
long tostr_fill[D];
long tistr_fill[D];
long tdims_fill[D];
md_copy_strides(D, tostr_fill, ostr);
md_copy_strides(D, tistr_fill, istr);
md_copy_dims(D, tdims_fill, dim);
long (*nstr2_fill[2])[D] = { &tostr_fill, &tistr_fill };
int ND_fill = simplify_dims(2, D, tdims_fill, nstr2_fill);
bool fill = (2 == ND_fill) || (1 == ND_fill);
size_t cp_size = 0;
unsigned long repetitions = 0;
if (2 == ND_fill) {
fill = fill && ((*nstr2_fill[0])[0] == (signed)size);
fill = fill && ((*nstr2_fill[1])[0] == (signed)size);
cp_size = tdims_fill[0] * size;
repetitions = tdims_fill[1];
fill = fill && ((*nstr2_fill[0])[1] == (signed)cp_size);
fill = fill && ((*nstr2_fill[1])[1] == 0);
}
if (1 == ND_fill) {
fill = fill && ((*nstr2_fill[0])[0] == (signed)size);
fill = fill && ((*nstr2_fill[1])[0] == 0);
cp_size = size;
repetitions = tdims_fill[0];
}
if (use_gpu && fill) {
cuda_memcpy(cp_size, optr, iptr);
unsigned int i = 1;
while (2 * i <= repetitions) {
cuda_memcpy(cp_size * i, optr + i * cp_size, optr);
i = i * 2;
}
if (0 < repetitions - i)
cuda_memcpy(cp_size * (repetitions - i), optr + i * cp_size, optr);
return;
}
#endif
#if 1
long tostr[D];
long tistr[D];
long tdims[D];
md_copy_strides(D, tostr, ostr);
md_copy_strides(D, tistr, istr);
md_copy_dims(D, tdims, dim);
long (*nstr2[2])[D] = { &tostr, &tistr };
int ND = optimize_dims_gpu(2, D, tdims, nstr2);
assert(ND <= (int)D);
#if 1
// permute dims with 0 input strides or negative in/output strides to the end
// these might be permuted to the inner dimensions by optimize_dims and break the strided copy
unsigned int perm[ND];
for (int i = 0, j = 0; i < ND; i++) {
if ( (0 >= (*nstr2[1])[i])
|| (0 >= (*nstr2[0])[i])) {
perm[ND - 1 -j] = i;
j += 1;
} else {
perm[i - j] = i;
}
}
long tmp[ND];
md_permute_dims(ND, perm, tmp, tdims);
md_copy_dims(ND, tdims, tmp);
md_permute_dims(ND, perm, tmp, tostr);
md_copy_dims(ND, tostr, tmp);
md_permute_dims(ND, perm, tmp, tistr);
md_copy_dims(ND, tistr, tmp);
#endif
size_t sizes[2] = { size, size };
int skip = min_blockdim(2, ND, tdims, nstr2, sizes);
debug_printf(DP_DEBUG4, "md_copy_2 skip=%d\n", skip);
debug_print_dims(DP_DEBUG4, ND, tdims);
debug_print_dims(DP_DEBUG4, ND, (*nstr2[0]));
debug_print_dims(DP_DEBUG4, ND, (*nstr2[1]));
if ( use_gpu
&& (ND - skip > 0)) {
assert(skip < ND);
long ostr2 = (*nstr2[0])[skip];
long istr2 = (*nstr2[1])[skip];
if (!( (ostr2 > 0)
&& (istr2 > 0)))
goto out;
void* nptr[2] = { optr, (void*)iptr };
long sizes[2] = { md_calc_size(skip, tdims) * size, tdims[skip] };
skip++;
const long* nstr[2] = { *nstr2[0] + skip, *nstr2[1] + skip };
long* sizesp = sizes; // because of clang
NESTED(void, nary_strided_copy, (void* ptr[]))
{
debug_printf(DP_DEBUG4, "CUDA 2D copy %ld %ld %ld %ld %ld %ld\n",
sizes[0], sizes[1], ostr2, istr2, nptr[0], nptr[1]);
cuda_memcpy_strided(sizesp, ostr2, ptr[0], istr2, ptr[1]);
};
md_nary(2, ND - skip, tdims + skip, nstr, nptr, nary_strided_copy);
return;
}
out: ;
#endif
#endif
const long (*nstr[2])[D] = { (const long (*)[D])ostr, (const long (*)[D])istr };
NESTED(void, nary_copy, (struct nary_opt_data_s* opt_data, void* ptr[]))
{
size_t size2 = size * opt_data->size;
#ifdef USE_CUDA
if (use_gpu) {
cuda_memcpy(size2, ptr[0], ptr[1]);
return;
}
#endif
memcpy(ptr[0], ptr[1], size2);
};
optimized_nop(2, MD_BIT(0), D, dim, nstr, (void*[2]){ optr, (void*)iptr }, (size_t[2]){ size, size }, nary_copy);
}
/**
* Copy array (without strides)
*
* optr[i] = iptr[i]
*/
void md_copy(unsigned int D, const long dim[D], void* optr, const void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dim, size);
md_copy2(D, dim, str, optr, str, iptr, size);
}
#ifdef USE_CUDA
// copied from flpmath.c
static void* gpu_constant(const void* vp, size_t size)
{
return md_gpu_move(1, (long[1]){ 1 }, vp, size);
}
#endif
/**
* Fill array with value pointed by pointer (with strides)
*
* ptr[i] = iptr[0]
*/
void md_fill2(unsigned int D, const long dim[D], const long str[D], void* ptr, const void* iptr, size_t size)
{
#ifdef USE_CUDA
if (cuda_ondevice(ptr) && (!cuda_ondevice(iptr))) {
void* giptr = gpu_constant(iptr, size);
md_fill2(D, dim, str, ptr, giptr, size);
md_free(giptr);
return;
}
#endif
long istr[D];
md_singleton_strides(D, istr);
md_copy2(D, dim, str, ptr, istr, iptr, size);
}
/**
* Fill array with value pointed by pointer (without strides)
*
* ptr[i] = iptr[0]
*/
void md_fill(unsigned int D, const long dim[D], void* ptr, const void* iptr, size_t size)
{
md_fill2(D, dim, MD_STRIDES(D, dim, size), ptr, iptr, size);
}
/**
* Swap values between a number of arrays (with strides)
*/
void md_circular_swap2(unsigned int M, unsigned int D, const long dims[D], const long* strs[M], void* ptr[M], size_t size)
{
size_t sizes[M];
for (unsigned int i = 0; i < M; i++)
sizes[i] = size;
const long (*nstrs[M])[D];
for (unsigned int i = 0; i < M; i++)
nstrs[i] = (const long (*)[D])strs[i];
NESTED(void, nary_swap, (struct nary_opt_data_s* opt_data, void* ptr[]))
{
size_t size2 = size * opt_data->size;
char* tmp = (size2 < 32) ? alloca(size2) : xmalloc(size2);
#ifdef USE_CUDA
assert(!cuda_ondevice(ptr[0]));
assert(!cuda_ondevice(ptr[1]));
#endif
memcpy(tmp, ptr[0], size2);
for (unsigned int i = 0; i < M - 1; i++)
memcpy(ptr[i], ptr[i + 1], size2);
memcpy(ptr[M - 1], tmp, size2);
if (size2 >= 32)
xfree(tmp);
};
optimized_nop(M, (1 << M) - 1, D, dims, nstrs, ptr, sizes, nary_swap);
}
/**
* Swap values between a number of arrays
*/
void md_circular_swap(unsigned M, unsigned int D, const long dims[D], void* ptr[M], size_t size)
{
long strs[M][D];
md_calc_strides(D, strs[0], dims, size);
const long* strp[M];
strp[0] = strs[0];
for (unsigned int i = 1; i < M; i++) {
md_copy_strides(D, strs[i], strs[0]);
strp[i] = strs[i];
}
md_circular_swap2(M, D, dims, strp, ptr, size);
}
/**
* Swap values between two arrays (with strides)
*
* iptr[i] = optr[i] and optr[i] = iptr[i]
*/
void md_swap2(unsigned int D, const long dim[D], const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size)
{
md_circular_swap2(2, D, dim, (const long*[2]){ ostr, istr }, (void*[2]){ optr, iptr }, size);
}
/**
* Swap values between two arrays (without strides)
*
* iptr[i] = optr[i] and optr[i] = iptr[i]
*/
void md_swap(unsigned int D, const long dim[D], void* optr, void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dim, size);
md_swap2(D, dim, str, optr, str, iptr, size);
}
/**
* Move a block from an array to another array (with strides)
*
*/
void md_move_block2(unsigned int D, const long dim[D], const long opos[D], const long odim[D], const long ostr[D], void* optr, const long ipos[D], const long idim[D], const long istr[D], const void* iptr, size_t size)
{
for (unsigned int i = 0; i < D; i++) {
assert(dim[i] <= odim[i]);
assert(dim[i] <= idim[i]);
assert((0 <= opos[i]) && (opos[i] <= odim[i] - dim[i]));
assert((0 <= ipos[i]) && (ipos[i] <= idim[i] - dim[i]));
}
long ioff = md_calc_offset(D, istr, ipos);
long ooff = md_calc_offset(D, ostr, opos);
md_copy2(D, dim, ostr, optr + ooff, istr, iptr + ioff, size);
}
/**
* Move a block from an array to another array (without strides)
*
*/
void md_move_block(unsigned int D, const long dim[D], const long opos[D], const long odim[D], void* optr, const long ipos[D], const long idim[D], const void* iptr, size_t size)
{
md_move_block2(D, dim,
opos, odim, MD_STRIDES(D, odim, size), optr,
ipos, idim, MD_STRIDES(D, idim, size), iptr, size);
}
/**
* Copy a block from an array to another array (with strides)
*
* Block dimensions are min(idim , odim)
*
* if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d]
*
* if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d]
*
*/
void md_copy_block2(unsigned int D, const long pos[D], const long odim[D], const long ostr[D], void* optr, const long idim[D], const long istr[D], const void* iptr, size_t size)
{
long dim[D];
long ipos[D];
long opos[D];
for (unsigned int i = 0; i < D; i++) {
assert((idim[i] != odim[i]) || (0 == pos[i]));
dim[i] = MIN(odim[i], idim[i]);
ipos[i] = 0;
opos[i] = 0;
if (idim[i] != dim[i])
ipos[i] = pos[i];
if (odim[i] != dim[i])
opos[i] = pos[i];
}
md_move_block2(D, dim, opos, odim, ostr, optr, ipos, idim, istr, iptr, size);
}
/**
* Copy a block from an array to another array (without strides)
*
* Block dimensions are min(idim , odim)
*
* if idim[d] > odim[d], then optr[i] = iptr[pos + i] for 0 <= i < odim[d]
*
* if idim[d] < odim[d], then optr[pos + i] = iptr[i] for 0 <= i < idim[d]
*
*/
void md_copy_block(unsigned int D, const long pos[D], const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
md_copy_block2(D, pos,
odim, MD_STRIDES(D, odim, size), optr,
idim, MD_STRIDES(D, idim, size), iptr, size);
}
/**
* Resize an array by zero-padding or by truncation at the end.
*
* optr = [iptr 0 0 0 0]
*
*/
void md_resize(unsigned int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
long pos[D];
memset(pos, 0, D * sizeof(long));
md_clear(D, odim, optr, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Pad an array by val at the end.
*
* optr = [iptr val val val val]
*
*/
void md_pad(unsigned int D, const void* val, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
long pos[D];
memset(pos, 0, D * sizeof(long));
md_fill(D, odim, optr, val, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Resize an array by zero-padding or by truncation at both ends symmetrically.
*
* optr = [0 0 iptr 0 0]
*
*/
void md_resize_center(unsigned int D, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
// the definition of the center position corresponds
// to the one used in the FFT.
long pos[D];
for (unsigned int i = 0; i < D; i++)
pos[i] = labs((odim[i] / 2) - (idim[i] / 2));
md_clear(D, odim, optr, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Pad an array on both ends by val.
*
* optr = [val val iptr val val]
*
*/
void md_pad_center(unsigned int D, const void* val, const long odim[D], void* optr, const long idim[D], const void* iptr, size_t size)
{
long pos[D];
for (unsigned int i = 0; i < D; i++)
pos[i] = labs((odim[i] / 2) - (idim[i] / 2));
md_fill(D, odim, optr, val, size);
md_copy_block(D, pos, odim, optr, idim, iptr, size);
}
/**
* Extract slice from array specified by flags (with strides)
*
* optr = iptr(pos[0], :, pos[2], :, :)
*
*/
void md_slice2(unsigned int D, unsigned long flags, const long pos[D], const long dim[D], const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
long odim[D];
md_select_dims(D, ~flags, odim, dim);
md_copy_block2(D, pos, odim, ostr, optr, dim, istr, iptr, size);
}
/**
* Extract slice from array specified by flags (with strides)
*
* optr = iptr(pos[0], :, pos[2], :, :)
*
*/
void md_slice(unsigned int D, unsigned long flags, const long pos[D], const long dim[D], void* optr, const void* iptr, size_t size)
{
long odim[D];
md_select_dims(D, ~flags, odim, dim);
md_slice2(D, flags, pos, dim,
MD_STRIDES(D, odim, size), optr,
MD_STRIDES(D, dim, size), iptr, size);
}
/**
* Permute array (with strides)
*
* optr[order[i]] = iptr[i]
*
*/
void md_permute2(unsigned int D, const unsigned int order[D], const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size)
{
unsigned int flags = 0;
long ostr2[D];
for (unsigned int i = 0; i < D; i++) {
assert(order[i] < D);
assert(odims[i] == idims[order[i]]);
flags = MD_SET(flags, order[i]);
ostr2[order[i]] = ostr[i];
}
assert(MD_BIT(D) == flags + 1);
md_copy2(D, idims, ostr2, optr, istr, iptr, size);
}
/**
* Permute array (without strides)
*
* optr[order[i]] = iptr[i]
*
*/
void md_permute(unsigned int D, const unsigned int order[D], const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
md_permute2(D, order,
odims, MD_STRIDES(D, odims, size), optr,
idims, MD_STRIDES(D, idims, size), iptr, size);
}
/**
* Permute dimensions
*
*
*/
void md_permute_dims(unsigned int D, const unsigned int order[D], long odims[D], const long idims[D])
{
for (unsigned int i = 0; i < D; i++)
odims[i] = idims[order[i]];
}
static void md_transpose_order(unsigned int D, unsigned int order[D], unsigned int dim1, unsigned int dim2)
{
assert(dim1 < D);
assert(dim2 < D);
for (unsigned int i = 0; i < D; i++)
order[i] = i;
order[dim1] = dim2;
order[dim2] = dim1;
}
/**
* Transpose dimensions
*
*
*/
void md_transpose_dims(unsigned int D, unsigned int dim1, unsigned int dim2, long odims[D], const long idims[D])
{
unsigned int order[D];
md_transpose_order(D, order, dim1, dim2);
md_permute_dims(D, order, odims, idims);
}
/**
* Tranpose array (with strides)
*
* optr[dim2] = iptr[dim1]
*
* optr[dim1] = iptr[dim2]
*
*/
void md_transpose2(unsigned int D, unsigned int dim1, unsigned int dim2, const long odims[D], const long ostr[D], void* optr, const long idims[D], const long istr[D], const void* iptr, size_t size)
{
for (unsigned int i = 0; i < D; i++)
if ((i != dim1) && (i != dim2))
assert(odims[i] == idims[i]);
assert(odims[dim1] == idims[dim2]);
assert(odims[dim2] == idims[dim1]);
unsigned int order[D];
md_transpose_order(D, order, dim1, dim2);
md_permute2(D, order, odims, ostr, optr, idims, istr, iptr, size);
}
/**
* Tranpose array (without strides)
*
* optr[dim2] = iptr[dim1]
*
* optr[dim1] = iptr[dim2]
*
*/
void md_transpose(unsigned int D, unsigned int dim1, unsigned int dim2, const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
md_transpose2(D, dim1, dim2,
odims, MD_STRIDES(D, odims, size), optr,
idims, MD_STRIDES(D, idims, size), iptr, size);
}
static void md_flip_inpl2(unsigned int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size);
/**
* Swap input and output while flipping selected dimensions
* at the same time.
*/
void md_swap_flip2(unsigned int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], void* iptr, size_t size)
{
#if 1
int i;
for (i = D - 1; i >= 0; i--)
if ((1 != dims[i]) && MD_IS_SET(flags, i))
break;
if (-1 == i) {
md_swap2(D, dims, ostr, optr, istr, iptr, size);
return;
}
assert(1 < dims[i]);
assert(ostr[i] != 0);
assert(istr[i] != 0);
long dims2[D];
md_copy_dims(D, dims2, dims);
dims2[i] = dims[i] / 2;
long off = (dims[i] + 1) / 2;
assert(dims2[i] + off == dims[i]);
md_swap_flip2(D, dims2, flags, ostr, optr, istr, iptr + off * istr[i], size);
md_swap_flip2(D, dims2, flags, ostr, optr + off * ostr[i], istr, iptr, size);
// odd, swap center plane
// (we should split in three similar sized chunks instead)
dims2[i] = 1;
if (1 == dims[i] % 2)
md_swap_flip2(D, dims2, flags, ostr, optr + (off - 1) * ostr[i], istr, iptr + (off - 1) * istr[i], size);
#else
// simpler, but more swaps
md_swap2(D, dims, ostr, optr, istr, iptr, size);
md_flip_inpl2(D, dims, flags, ostr, optr, size);
md_flip_inpl2(D, dims, flags, istr, iptr, size);
#endif
}
/**
* Swap input and output while flipping selected dimensions
* at the same time.
*/
void md_swap_flip(unsigned int D, const long dims[D], unsigned long flags, void* optr, void* iptr, size_t size)
{
long strs[D];
md_calc_strides(D, strs, dims, size);
md_swap_flip2(D, dims, flags, strs, optr, strs, iptr, size);
}
static void md_flip_inpl2(unsigned int D, const long dims[D], unsigned long flags, const long str[D], void* ptr, size_t size)
{
int i;
for (i = D - 1; i >= 0; i--)
if ((1 != dims[i]) && MD_IS_SET(flags, i))
break;
if (-1 == i)
return;
assert(1 < dims[i]);
assert(str[i] != 0);
long dims2[D];
md_copy_dims(D, dims2, dims);
dims2[i] = dims[i] / 2;
long off = str[i] * (0 + (dims[i] + 1) / 2);
md_swap_flip2(D, dims2, flags, str, ptr, str, ptr + off, size);
}
/**
* Flip array (with strides)
*
* optr[dims[D] - 1 - i] = iptr[i]
*
*/
void md_flip2(unsigned int D, const long dims[D], unsigned long flags, const long ostr[D], void* optr, const long istr[D], const void* iptr, size_t size)
{
if (optr == iptr) {
assert(ostr == istr);
md_flip_inpl2(D, dims, flags, ostr, optr, size);
return;
}
long off = 0;
long ostr2[D];
for (unsigned int i = 0; i < D; i++) {
ostr2[i] = ostr[i];
if (MD_IS_SET(flags, i)) {
ostr2[i] = -ostr[i];
off += (dims[i] - 1) * ostr[i];
}
}
md_copy2(D, dims, ostr2, optr + off, istr, iptr, size);
}
/**
* Flip array (without strides)
*
* optr[dims[D] - 1 - i] = iptr[i]
*
*/
void md_flip(unsigned int D, const long dims[D], unsigned long flags, void* optr, const void* iptr, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
md_flip2(D, dims, flags, str, optr, str, iptr, size);
}
/**
* Reshape array (with strides)
*
* Only flagged dims may flow
*/
void md_reshape2(unsigned int D, unsigned long flags, const long odims[D], const long ostrs[D], void* optr, const long idims[D], const long istrs[D], const void* iptr, size_t size)
{
assert(md_calc_size(D, odims) == md_calc_size(D, idims));
assert(md_check_equal_dims(D, odims, idims, ~flags));
unsigned int order[D];
unsigned int j = 0;
for (unsigned int i = 0; i < D; i++)
if (MD_IS_SET(flags, i))
order[j++] = i;
for (unsigned int i = 0; i < D; i++)
if (!MD_IS_SET(flags, i))
order[j++] = i;
assert(D == j);
unsigned int iorder[D];
for (unsigned int i = 0; i < D; i++)
iorder[order[i]] = i;
long dims2[D];
long strs2[D];
// FIXME: we could avoid the buffer in some cases
void* buf = md_alloc_sameplace(D, odims, size, optr);
md_permute_dims(D, order, dims2, idims);
md_calc_strides(D, strs2, dims2, size);
md_permute2(D, order, dims2, strs2, buf, idims, istrs, iptr, size);
md_permute_dims(D, order, dims2, odims);
md_calc_strides(D, strs2, dims2, size);
md_permute2(D, iorder, odims, ostrs, optr, dims2, strs2, buf, size);
md_free(buf);
}
/**
* Reshape array (without strides)
*
* Only flagged dims may flow
*/
void md_reshape(unsigned int D, unsigned long flags, const long odims[D], void* optr, const long idims[D], const void* iptr, size_t size)
{
assert(md_calc_size(D, odims) == md_calc_size(D, idims));
assert(md_check_equal_dims(D, odims, idims, ~flags));
long ostrs[D];
md_calc_strides(D, ostrs, odims, size);
long istrs[D];
md_calc_strides(D, istrs, idims, size);
if (md_check_equal_dims(D, ostrs, istrs, ~flags)) { // strides consistent!
md_copy(D, odims, optr, iptr, size);
} else {
md_reshape2(D, flags, odims, ostrs, optr, idims, istrs, iptr, size);
}
}
bool md_compare2(unsigned int D, const long dims[D], const long str1[D], const void* src1,
const long str2[D], const void* src2, size_t size)
{
__block bool eq = true;
const long (*nstr[2])[D] = { (const long (*)[D])str1, (const long (*)[D])str2 };
NESTED(void, nary_cmp, (struct nary_opt_data_s* opt_data, void* ptrs[]))
{
size_t size2 = size * opt_data->size;
bool eq2 = (0 == memcmp(ptrs[0], ptrs[1], size2));
#pragma omp critical
eq &= eq2;
};
optimized_nop(2, 0u, D, dims, nstr, (void*[2]){ (void*)src1, (void*)src2 }, (size_t[2]){ size, size }, nary_cmp);
return eq;
}
bool md_compare(unsigned int D, const long dims[D], const void* src1, const void* src2, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
return md_compare2(D, dims, str, src1, str, src2, size);
}
static void md_septrafo_r(unsigned int D, unsigned int R, long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun)
{
if (0 == R--)
return;
md_septrafo_r(D, R, dimensions, flags, strides, ptr, fun);
if (MD_IS_SET(flags, R)) {
void* nptr[1] = { ptr };
const long* nstrides[1] = { strides };
long dimsR = dimensions[R];
long strsR = strides[R]; // because of clang
dimensions[R] = 1; // we made a copy in md_septrafo2
NESTED(void, nary_septrafo, (void* ptr[]))
{
fun(dimsR, strsR, ptr[0]);
};
//md_nary_parallel(1, D, dimensions, nstrides, nptr, &data, nary_septrafo);
md_nary(1, D, dimensions, nstrides, nptr, nary_septrafo);
dimensions[R] = dimsR;
}
}
/**
* Apply a separable transformation along selected dimensions.
*
*/
void md_septrafo2(unsigned int D, const long dimensions[D], unsigned long flags, const long strides[D], void* ptr, md_trafo_fun_t fun)
{
long dimcopy[D];
md_copy_dims(D, dimcopy, dimensions);
md_septrafo_r(D, D, dimcopy, flags, strides, ptr, fun);
}
/**
* Apply a separable transformation along selected dimensions.
*
*/
void md_septrafo(unsigned int D, const long dims[D], unsigned long flags, void* ptr, size_t size, md_trafo_fun_t fun)
{
md_septrafo2(D, dims, flags, MD_STRIDES(D, dims, size), ptr, fun);
}
/**
* Copy diagonals from array specified by flags (with strides)
*
* dst(i, i, :, i, :) = src(i, i, :, i, :)
*
*/
void md_copy_diag2(unsigned int D, const long dims[D], unsigned long flags, const long str1[D], void* dst, const long str2[D], const void* src, size_t size)
{
long stride1 = 0;
long stride2 = 0;
long count = -1;
for (unsigned int i = 0; i < D; i++) {
if (MD_IS_SET(flags, i)) {
if (count < 0)
count = dims[i];
assert(dims[i] == count);
stride1 += str1[i];
stride2 += str2[i];
}
}
long xdims[D];
md_select_dims(D, ~flags, xdims, dims);
for (long i = 0; i < count; i++)
md_copy2(D, xdims, str1, dst + i * stride1, str2, src + i * stride2, size);
}
/**
* Copy diagonals from array specified by flags (without strides)
*
* dst(i ,i ,: ,i , :) = src(i ,i ,: ,i ,:)
*
*/
void md_copy_diag(unsigned int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size)
{
long str[D];
md_calc_strides(D, str, dims, size);
md_copy_diag2(D, dims, flags, str, dst, str, src, size);
}
/**
* Fill diagonals specified by flags with value (without strides)
*
* dst(i, i, :, i, :) = src[0]
*
*/
void md_fill_diag(unsigned int D, const long dims[D], unsigned long flags, void* dst, const void* src, size_t size)
{
long str2[D];
md_singleton_strides(D, str2);
md_copy_diag2(D, dims, flags, MD_STRIDES(D, dims, size), dst, str2, src, size);
}
static void md_circ_shift_inpl2(unsigned int D, const long dims[D], const long center[D], const long strs[D], void* dst, size_t size)
{
#if 0
long dims1[D];
long dims2[D];
md_copy_dims(D, dims1, dims);
md_copy_dims(D, dims2, dims);
unsigned int i;
for (i = 0; i < D; i++) {
if (0 != center[i]) {
dims1[i] = center[i];
dims2[i] = dims[i] - center[i];
break;
}
}
if (i == D)
return;
long off = strs[i] * center[i];
// cool but slow, instead we want to have a chain of swaps
md_flip2(D, dims, MD_BIT(i), strs, dst, strs, dst, size);
md_flip2(D, dims1, MD_BIT(i), strs, dst, strs, dst, size);
md_flip2(D, dims2, MD_BIT(i), strs, dst + off, strs, dst + off, size);
// also not efficient, we want to merge the chain of swaps
long center2[D];
md_copy_dims(D, center2, center);
center2[i] = 0;
md_circ_shift_inpl2(D, dims, center2, strs, dst, size);
#else
// use tmp for now
unsigned int i;
for (i = 0; i < D; i++)
if (0 != center[i])
break;
if (i == D)
return;
long tmp_strs[D];
md_calc_strides(D, tmp_strs, dims, size);
void* tmp = md_alloc_sameplace(D, dims, size, dst);
md_copy2(D, dims, tmp_strs, tmp, strs, dst, size);
md_circ_shift2(D, dims, center, strs, dst, tmp_strs, tmp, size);
md_free(tmp);
#endif
}
/**
* Circularly shift array (with strides)
*
* dst[mod(i + center)] = src[i]
*
*/
void md_circ_shift2(unsigned int D, const long dimensions[D], const long center[D], const long str1[D], void* dst, const long str2[D], const void* src, size_t size)
{
long pos[D];
for (unsigned int i = 0; i < D; i++) { // FIXME: it would be better to calc modulo
pos[i] = center[i];
while (pos[i] < 0)
pos[i] += dimensions[i];
}
unsigned int i = 0; // FIXME :maybe we shoud search the other way?
while ((i < D) && (0 == pos[i]))
i++;
if (D == i) {
md_copy2(D, dimensions, str1, dst, str2, src, size);
return;
}
if (dst == src) {
assert(str1 == str2);
md_circ_shift_inpl2(D, dimensions, pos, str1, dst, size);
return;
}
long shift = pos[i];
assert(shift != 0);
long dim1[D];
long dim2[D];
md_copy_dims(D, dim1, dimensions);
md_copy_dims(D, dim2, dimensions);
dim1[i] = shift;
dim2[i] = dimensions[i] - shift;
assert((dim1[i] >= 0) && (dim2[i] >= 0));
pos[i] = 0;
//printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions));
md_circ_shift2(D, dim1, pos, str1, dst, str2, src + dim2[i] * str2[i], size);
md_circ_shift2(D, dim2, pos, str1, dst + dim1[i] * str1[i], str2, src, size);
}
/**
* Circularly shift array (without strides)
*
* dst[mod(i + center)] = src[i]
*
*/
void md_circ_shift(unsigned int D, const long dimensions[D], const long center[D], void* dst, const void* src, size_t size)
{
long strides[D];
md_calc_strides(D, strides, dimensions, size);
md_circ_shift2(D, dimensions, center, strides, dst, strides, src, size);
}
/**
* Circularly extend array (with strides)
*
*/
void md_circ_ext2(unsigned int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size)
{
long ext[D];
for (unsigned int i = 0; i < D; i++) {
ext[i] = dims1[i] - dims2[i];
assert(ext[i] >= 0);
assert(ext[i] <= dims2[i]);
}
unsigned int i = 0; // FIXME :maybe we shoud search the other way?
while ((i < D) && (0 == ext[i]))
i++;
if (D == i) {
md_copy2(D, dims1, strs1, dst, strs2, src, size);
return;
}
long dims1_crop[D];
long dims2_crop[D];
long ext_dims[D];
md_copy_dims(D, dims1_crop, dims1);
md_copy_dims(D, dims2_crop, dims2);
md_copy_dims(D, ext_dims, dims1);
dims1_crop[i] = dims2[i];
dims2_crop[i] = ext[i];
ext_dims[i] = ext[i];
ext[i] = 0;
//printf("%d: %ld %ld %d\n", i, dim1[i], dim2[i], sizeof(dimensions));
md_circ_ext2(D, dims1_crop, strs1, dst, dims2, strs2, src, size);
md_circ_ext2(D, ext_dims, strs1, dst + dims2[i] * strs1[i], dims2_crop, strs2, src, size);
}
/**
* Circularly extend array (without strides)
*
*/
void md_circ_ext(unsigned int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size)
{
md_circ_ext2(D, dims1, MD_STRIDES(D, dims1, size), dst,
dims2, MD_STRIDES(D, dims2, size), src, size);
}
/**
* Periodically extend array (with strides)
*
*/
void md_periodic2(unsigned int D, const long dims1[D], const long strs1[D], void* dst, const long dims2[D], const long strs2[D], const void* src, size_t size)
{
long dims1B[2 * D];
long strs1B[2 * D];
long strs2B[2 * D];
for (unsigned int i = 0; i < D; i++) {
assert(0 == dims1[i] % dims2[i]);
// blocks
dims1B[2 * i + 0] = dims2[i];
strs1B[2 * i + 0] = strs1[i];
strs2B[2 * i + 0] = strs2[i];
// periodic copies
dims1B[2 * i + 0] = dims1[i] / dims2[i];
strs1B[2 * i + 0] = strs1[i] * dims2[i];
strs2B[2 * i + 0] = 0;
}
md_copy2(D, dims1B, strs1B, dst, strs2B, src, size);
}
/**
* Periodically extend array (without strides)
*
*/
void md_periodic(unsigned int D, const long dims1[D], void* dst, const long dims2[D], const void* src, size_t size)
{
md_periodic2(D, dims1, MD_STRIDES(D, dims1, size), dst,
dims2, MD_STRIDES(D, dims2, size), src, size);
}
/**
* Allocate CPU memory
*
* return pointer to CPU memory
*/
void* md_alloc(unsigned int D, const long dimensions[D], size_t size)
{
return xmalloc(md_calc_size(D, dimensions) * size);
}
/**
* Allocate CPU memory and clear
*
* return pointer to CPU memory
*/
void* md_calloc(unsigned int D, const long dimensions[D], size_t size)
{
void* ptr = md_alloc(D, dimensions, size);
md_clear(D, dimensions, ptr, size);
return ptr;
}
#ifdef USE_CUDA
/**
* Allocate GPU memory
*
* return pointer to GPU memory
*/
void* md_alloc_gpu(unsigned int D, const long dimensions[D], size_t size)
{
return cuda_malloc(md_calc_size(D, dimensions) * size);
}
/**
* Allocate GPU memory and copy from CPU pointer
*
* return pointer to GPU memory
*/
void* md_gpu_move(unsigned int D, const long dims[D], const void* ptr, size_t size)
{
if (NULL == ptr)
return NULL;
void* gpu_ptr = md_alloc_gpu(D, dims, size);
md_copy(D, dims, gpu_ptr, ptr, size);
return gpu_ptr;
}
#endif
/**
* Allocate memory on the same device (CPU/GPU) place as ptr
*
* return pointer to CPU memory if ptr is in CPU or to GPU memory if ptr is in GPU
*/
void* md_alloc_sameplace(unsigned int D, const long dimensions[D], size_t size, const void* ptr)
{
#ifdef USE_CUDA
return (cuda_ondevice(ptr) ? md_alloc_gpu : md_alloc)(D, dimensions, size);
#else
assert(0 != ptr);
return md_alloc(D, dimensions, size);
#endif
}
/**
* Free CPU/GPU memory
*
*/
void md_free(const void* ptr)
{
#ifdef USE_CUDA
if (cuda_ondevice(ptr))
cuda_free((void*)ptr);
else
#endif
xfree(ptr);
}
|
acoustics.c | /*
* Student: Trascau Mihai
* Grupa: 344C4
*
* Lucrare: Ecuatia undelor pentru acustica 2D
* Fisier: acoustics.c
* Descriere: Fisier sursa in care este implementat tot programul folosind functiile descrise in celelalte fisiere sursa.
*/
#include "acoustics.h"
int main(int argc, char *argv[])
{
int i,j;
int err;
int numtask, rank;
time_t start_time;
MPI_Status status;
MPI_Datatype MPI_STRUCTURE, str_types[1];
MPI_Datatype MPI_SOURCE, src_types[1];
MPI_Datatype MPI_SCENARIO, scn_types[4];
MPI_Aint str_offsets[1], src_offsets[1], scn_offsets[4], extent;
int str_blockcounts[1], src_blockcounts[2], scn_blockcounts[4];
err = MPI_Init(&argc,&argv);
if(err != MPI_SUCCESS)
EXIT_ERROR("[EROARE] Initializare MPI esuata\n\n");
MPI_Comm_size(MPI_COMM_WORLD,&numtask);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
str_offsets[0] = 0;
str_types[0] = MPI_INT;
str_blockcounts[0] = 8;
MPI_Type_struct(1,str_blockcounts,str_offsets,str_types,&MPI_STRUCTURE);
MPI_Type_commit(&MPI_STRUCTURE);
src_offsets[0] = 0;
src_types[0] = MPI_INT;
src_blockcounts[0] = 3;
MPI_Type_struct(1,src_blockcounts,src_offsets,src_types,&MPI_SOURCE);
MPI_Type_commit(&MPI_SOURCE);
scn_offsets[0] = 0;
scn_types[0] = MPI_INT;
scn_blockcounts[0] = 5;
MPI_Type_extent(MPI_INT,&extent);
scn_offsets[1] = scn_offsets[0] + scn_blockcounts[0]*extent;
scn_types[1] = MPI_DOUBLE;
scn_blockcounts[1] = 4;
MPI_Type_extent(MPI_DOUBLE,&extent);
scn_offsets[2] = scn_offsets[1] + scn_blockcounts[1]*extent;
scn_types[2] = MPI_SOURCE;
scn_blockcounts[2] = 1;
MPI_Type_extent(MPI_SOURCE,&extent);
scn_offsets[3] = scn_offsets[2] + scn_blockcounts[2]*extent;
scn_types[3] = MPI_STRUCTURE;
scn_blockcounts[3] = MAX_STRUCTURES;
MPI_Type_struct(4,scn_blockcounts,scn_offsets,scn_types,&MPI_SCENARIO);
MPI_Type_commit(&MPI_SCENARIO);
if(rank == 0)
{
if(argc != 2)
EXIT_ERROR("[EROARE] Numar incorect de argumente. Folosire: ./acoustics <input_file_name>\n\n");
if(import_data(argv[1]))
EXIT_ERROR("[EROARE] Datele de intrare nu au putut fi incarcate\n\n");
for(i=1;i<numtask;i++)
MPI_Send(&num_scenarios,1,MPI_INT,i,1,MPI_COMM_WORLD);
}
else
MPI_Recv(&num_scenarios,1,MPI_INT,0,1,MPI_COMM_WORLD,&status);
if(rank == 0)
for(i=1;i<numtask;i++)
MPI_Send(&scenario,num_scenarios,MPI_SCENARIO,i,1,MPI_COMM_WORLD);
else
MPI_Recv(&scenario,num_scenarios,MPI_SCENARIO,0,1,MPI_COMM_WORLD,&status);
//print_import_data(rank);
scn_index = 0;
while(scn_index < num_scenarios)
{
int step = 0;
int source_active = 1;
start_time = time(NULL);
omp_set_num_threads(scenario[scn_index].OMP_THREADS);
load_scenario();
if(rank == 0)
{
local_ny = ny/numtask;
init_scenario(ny);
}
else if(rank != numtask-1)
{
local_ny = ny/numtask;
init_scenario(local_ny+2);
}
else
{
local_ny = ny - (numtask-1)*(ny/numtask);
init_scenario(local_ny+1);
}
recalculate_positions(rank,numtask);
int start, stop;
int radius = scenario[scn_index].source.radius;
while(step < (int)(MAX_TIME/TIME_STEP))
{
if(rank == 0) start = 0;
else start = 1;
if(rank == numtask-1 || rank == 0) stop = local_ny+1;
else stop = local_ny+2;
if(step < (int)(MAX_TIME/TIME_STEP)/2)
pulse_source(radius,step,scenario[scn_index].amp);
else if(source_active)
{
#pragma omp parallel for private(i,j)
for(i=start;i<stop;i++)
for(j=0;j<nx;j++)
{
if(is_source(i,j,radius,source_active))
uc[i][j] = ub[i][j] = ua[i][j] = 0;
}
source_active = 0;
}
m_compute_acoustics(rank,numtask,source_active,radius);
MPI_Barrier(MPI_COMM_WORLD);
if(rank == 0)
{
MPI_Recv(uc[local_ny],nx,MPI_DOUBLE,1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[local_ny-1],nx,MPI_DOUBLE,1,1,MPI_COMM_WORLD);
}
else if(rank%2 == 1 && rank != numtask-1)
{
MPI_Send(uc[1],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD);
MPI_Recv(uc[0],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[local_ny],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD);
MPI_Recv(uc[local_ny+1],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD,&status);
}
else if(rank%2 == 0 && rank != numtask-1)
{
MPI_Recv(uc[local_ny+1],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[local_ny],nx,MPI_DOUBLE,rank+1,1,MPI_COMM_WORLD);
MPI_Recv(uc[0],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD,&status);
MPI_Send(uc[1],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD);
}
else if(rank == numtask-1)
{
MPI_Send(uc[1],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD);
MPI_Recv(uc[0],nx,MPI_DOUBLE,rank-1,1,MPI_COMM_WORLD,&status);
}
if(step%SAVE_TIME == 1)
{
if(rank == 0)
{
for(i=1;i<numtask;i++)
{
if(i != numtask-1)
for(j=0;j<ny/numtask;j++)
MPI_Recv(uc[i*(ny/numtask)+j],nx,MPI_DOUBLE,i,1,MPI_COMM_WORLD,&status);
if(i == numtask-1)
for(j=(numtask-1)*(ny/numtask);j<ny;j++)
MPI_Recv(uc[j],nx,MPI_DOUBLE,i,1,MPI_COMM_WORLD,&status);
}
export_to_vtk(step);
}
else
for(i=1;i<local_ny+1;i++)
MPI_Send(uc[i],nx,MPI_DOUBLE,0,1,MPI_COMM_WORLD);
}
xchg = ua;
ua = ub;
ub = uc;
uc = xchg;
MPI_Barrier(MPI_COMM_WORLD);
step++;
}
unload_scenario();
if(rank == 0)
{
time_t stop = time(NULL);
double compute_time = difftime(stop,start_time);
export_to_gnuplot(scn_index,compute_time);
}
scn_index++;
}
MPI_Type_free(&MPI_SCENARIO);
MPI_Barrier(MPI_COMM_WORLD);
MPI_Finalize();
return 0;
}
|
par_multi_interp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
/*--------------------------------------------------------------------------
* hypre_ParAMGBuildMultipass
* This routine implements Stuben's direct interpolation with multiple passes.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildMultipass( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int P_max_elmts,
HYPRE_Int weight_option,
HYPRE_Int *col_offd_S_to_A,
hypre_ParCSRMatrix **P_ptr )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] -= hypre_MPI_Wtime();
#endif
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = NULL;
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = NULL;
HYPRE_BigInt *col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = NULL;
HYPRE_Int num_cols_offd;
hypre_ParCSRMatrix *P;
hypre_CSRMatrix *P_diag;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_diag_j;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_offd_data = NULL;
HYPRE_Int *P_offd_i; /*at first counter of nonzero cols for each row,
finally will be pointer to start of row */
HYPRE_Int *P_offd_j = NULL;
HYPRE_Int num_sends = 0;
HYPRE_Int *int_buf_data = NULL;
HYPRE_BigInt *big_buf_data = NULL;
HYPRE_Int *send_map_start;
HYPRE_Int *send_map_elmt;
HYPRE_Int *send_procs;
HYPRE_Int num_recvs = 0;
HYPRE_Int *recv_vec_start;
HYPRE_Int *recv_procs;
HYPRE_Int *new_recv_vec_start = NULL;
HYPRE_Int **Pext_send_map_start = NULL;
HYPRE_Int **Pext_recv_vec_start = NULL;
HYPRE_Int *Pext_start = NULL;
HYPRE_Int *P_ncols = NULL;
HYPRE_Int *CF_marker_offd = NULL;
HYPRE_Int *dof_func_offd = NULL;
HYPRE_Int *P_marker;
HYPRE_Int *P_marker_offd = NULL;
HYPRE_Int *C_array;
HYPRE_Int *C_array_offd = NULL;
HYPRE_Int *pass_array = NULL; /* contains points ordered according to pass */
HYPRE_Int *pass_pointer = NULL; /* pass_pointer[j] contains pointer to first
point of pass j contained in pass_array */
HYPRE_Int *P_diag_start;
HYPRE_Int *P_offd_start = NULL;
HYPRE_Int **P_diag_pass;
HYPRE_Int **P_offd_pass = NULL;
HYPRE_Int **Pext_pass = NULL;
HYPRE_BigInt *big_temp_pass = NULL;
HYPRE_BigInt **new_elmts = NULL; /* new neighbors generated in each pass */
HYPRE_Int *new_counter = NULL; /* contains no. of new neighbors for
each pass */
HYPRE_Int *loc = NULL; /* contains locations for new neighbor
connections in int_o_buffer to avoid searching */
HYPRE_Int *Pext_i = NULL; /*contains P_diag_i and P_offd_i info for nonzero
cols of off proc neighbors */
HYPRE_BigInt *Pext_send_buffer = NULL; /* used to collect global nonzero
col ids in P_diag for send_map_elmts */
HYPRE_Int *map_S_to_new = NULL;
/*HYPRE_Int *map_A_to_new = NULL;*/
HYPRE_Int *map_A_to_S = NULL;
HYPRE_BigInt *new_col_map_offd = NULL;
HYPRE_BigInt *col_map_offd_P = NULL;
HYPRE_Int *permute = NULL;
HYPRE_BigInt *big_permute = NULL;
HYPRE_Int cnt;
HYPRE_Int cnt_nz;
HYPRE_Int total_nz;
HYPRE_Int pass;
HYPRE_Int num_passes;
HYPRE_Int max_num_passes = 10;
HYPRE_Int n_fine;
HYPRE_Int n_coarse = 0;
HYPRE_Int n_coarse_offd = 0;
HYPRE_Int n_SF = 0;
HYPRE_Int n_SF_offd = 0;
HYPRE_Int *fine_to_coarse = NULL;
HYPRE_BigInt *fine_to_coarse_offd = NULL;
HYPRE_Int *assigned = NULL;
HYPRE_Int *assigned_offd = NULL;
HYPRE_Real *Pext_send_data = NULL;
HYPRE_Real *Pext_data = NULL;
HYPRE_Real sum_C, sum_N;
HYPRE_Real sum_C_pos, sum_C_neg;
HYPRE_Real sum_N_pos, sum_N_neg;
HYPRE_Real diagonal;
HYPRE_Real alfa = 1.0;
HYPRE_Real beta = 1.0;
HYPRE_Int j_start;
HYPRE_Int j_end;
HYPRE_Int i,i1;
HYPRE_Int j,j1;
HYPRE_Int k,k1,k2,k3;
HYPRE_BigInt big_k1;
HYPRE_Int pass_array_size;
HYPRE_BigInt global_pass_array_size;
HYPRE_BigInt local_pass_array_size;
HYPRE_Int my_id, num_procs;
HYPRE_Int index, start;
HYPRE_BigInt my_first_cpt;
HYPRE_BigInt total_global_cpts;
HYPRE_Int p_cnt;
HYPRE_Int total_nz_offd;
HYPRE_Int cnt_nz_offd;
HYPRE_Int cnt_offd, cnt_new;
HYPRE_Int no_break;
HYPRE_Int not_found;
HYPRE_Int Pext_send_size;
HYPRE_Int Pext_recv_size;
HYPRE_Int old_Pext_send_size;
HYPRE_Int old_Pext_recv_size;
HYPRE_Int P_offd_size = 0;
HYPRE_Int local_index = -1;
HYPRE_Int new_num_cols_offd = 0;
HYPRE_Int num_cols_offd_P;
/* Threading variables */
HYPRE_Int my_thread_num, num_threads, thread_start, thread_stop;
HYPRE_Int pass_length;
HYPRE_Int *tmp_marker, *tmp_marker_offd;
HYPRE_Int *tmp_array, *tmp_array_offd;
HYPRE_Int * max_num_threads = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST);
HYPRE_Int * cnt_nz_per_thread;
HYPRE_Int * cnt_nz_offd_per_thread;
/* HYPRE_Real wall_time;
wall_time = hypre_MPI_Wtime(); */
/* Initialize threading variables */
max_num_threads[0] = hypre_NumThreads();
cnt_nz_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
cnt_nz_offd_per_thread = hypre_CTAlloc(HYPRE_Int, max_num_threads[0], HYPRE_MEMORY_HOST);
for(i=0; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] = 0;
cnt_nz_per_thread[i] = 0;
}
/*-----------------------------------------------------------------------
* Access the CSR vectors for A and S. Also get size of fine grid.
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm,&num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
#ifdef HYPRE_NO_GLOBAL_PARTITION
my_first_cpt = num_cpts_global[0];
/* total_global_cpts = 0; */
if (my_id == (num_procs -1)) total_global_cpts = num_cpts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs-1, comm);
#else
my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
if (!comm_pkg)
{
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
col_offd_S_to_A = NULL;
}
if (col_offd_S_to_A)
{
col_map_offd = col_map_offd_S;
num_cols_offd = num_cols_offd_S;
}
else
{
col_map_offd = col_map_offd_A;
num_cols_offd = num_cols_offd_A;
}
if (num_cols_offd_A)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
A_offd_j = hypre_CSRMatrixJ(A_offd);
}
if (num_cols_offd)
S_offd_j = hypre_CSRMatrixJ(S_offd);
n_fine = hypre_CSRMatrixNumRows(A_diag);
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
if (n_fine) fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
n_coarse = 0;
n_SF = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse,n_SF ) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == 1) n_coarse++;
else if (CF_marker[i] == -3) n_SF++;
pass_array_size = n_fine-n_coarse-n_SF;
if (pass_array_size) pass_array = hypre_CTAlloc(HYPRE_Int, pass_array_size, HYPRE_MEMORY_HOST);
pass_pointer = hypre_CTAlloc(HYPRE_Int, max_num_passes+1, HYPRE_MEMORY_HOST);
if (n_fine) assigned = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_SHARED);
if (n_coarse) C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1) dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
send_map_start = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmt = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
recv_vec_start = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
if (send_map_start[num_sends])
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, send_map_start[num_sends], HYPRE_MEMORY_HOST);
}
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = CF_marker[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (num_functions > 1)
{
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
int_buf_data[index++] = dof_func[send_map_elmt[j]];
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
n_coarse_offd = 0;
n_SF_offd = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) reduction(+:n_coarse_offd,n_SF_offd) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd; i++)
if (CF_marker_offd[i] == 1) n_coarse_offd++;
else if (CF_marker_offd[i] == -3) n_SF_offd++;
if (num_cols_offd)
{
assigned_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
map_S_to_new = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
new_col_map_offd = hypre_CTAlloc(HYPRE_BigInt, n_coarse_offd, HYPRE_MEMORY_HOST);
}
/*-----------------------------------------------------------------------
* First Pass: determine the maximal size of P, and elementsPerRow[i].
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Assigned points are points for which we know an interpolation
* formula already, and which are thus available to interpolate from.
* assigned[i]=0 for C points, and 1, 2, 3, ... for F points, depending
* in which pass their interpolation formula is determined.
*
* pass_array contains the points ordered according to its pass, i.e.
* | C-points | points of pass 1 | points of pass 2 | ....
* C_points are points 0 through pass_pointer[1]-1,
* points of pass k (0 < k < num_passes) are contained in points
* pass_pointer[k] through pass_pointer[k+1]-1 of pass_array .
*
* pass_array is also used to avoid going through all points for each pass,
* i,e. at the bginning it contains all points in descending order starting
* with n_fine-1. Then starting from the last point, we evaluate whether
* it is a C_point (pass 0). If it is the point is brought to the front
* and the length of the points to be searched is shortened. This is
* done until the parameter cnt (which determines the first point of
* pass_array to be searched) becomes n_fine. Then all points have been
* assigned a pass number.
*-----------------------------------------------------------------------*/
cnt = 0;
p_cnt = pass_array_size-1;
P_diag_i[0] = 0;
P_offd_i[0] = 0;
for (i = 0; i < n_fine; i++)
{
if (CF_marker[i] == 1)
{
fine_to_coarse[i] = cnt; /* this C point is assigned index
coarse_counter on coarse grid,
and in column of P */
C_array[cnt++] = i;
assigned[i] = 0;
P_diag_i[i+1] = 1; /* one element in row i1 of P */
P_offd_i[i+1] = 0;
}
else if (CF_marker[i] == -1)
{
pass_array[p_cnt--] = i;
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
else
{
P_diag_i[i+1] = 0;
P_offd_i[i+1] = 0;
assigned[i] = -1;
fine_to_coarse[i] = -1;
}
}
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{
big_buf_data[index] = (HYPRE_BigInt)fine_to_coarse[send_map_elmt[j]];
if (big_buf_data[index] > -1)
big_buf_data[index] += my_first_cpt;
index++;
}
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
new_recv_vec_start = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
if (n_coarse_offd)
C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);
cnt = 0;
new_recv_vec_start[0] = 0;
for (j = 0; j < num_recvs; j++)
{
for (i = recv_vec_start[j]; i < recv_vec_start[j+1]; i++)
{
if (CF_marker_offd[i] == 1)
{
map_S_to_new[i] = cnt;
C_array_offd[cnt] = i;
new_col_map_offd[cnt++] = fine_to_coarse_offd[i];
assigned_offd[i] = 0;
}
else
{
assigned_offd[i] = -1;
map_S_to_new[i] = -1;
}
}
new_recv_vec_start[j+1] = cnt;
}
cnt = 0;
hypre_TFree(fine_to_coarse_offd, HYPRE_MEMORY_HOST);
if (col_offd_S_to_A)
{
map_A_to_S = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd_A; i++)
{
if (cnt < num_cols_offd && col_map_offd_A[i] == col_map_offd[cnt])
map_A_to_S[i] = cnt++;
else
map_A_to_S[i] = -1;
}
}
/*-----------------------------------------------------------------------
* Mark all local neighbors of C points as 'assigned'.
*-----------------------------------------------------------------------*/
pass_pointer[0] = 0;
pass_pointer[1] = 0;
total_nz = n_coarse; /* accumulates total number of nonzeros in P_diag */
total_nz_offd = 0; /* accumulates total number of nonzeros in P_offd */
cnt = 0;
cnt_offd = 0;
cnt_nz = 0;
cnt_nz_offd = 0;
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{
P_diag_i[i1+1]++;
cnt_nz++;
assigned[i1] = 1;
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{
P_offd_i[i1+1]++;
cnt_nz_offd++;
assigned[i1] = 1;
}
}
if (assigned[i1] == 1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
}
}
pass_pointer[2] = cnt;
/*-----------------------------------------------------------------------
* All local neighbors are assigned, now need to exchange the boundary
* info for assigned strong neighbors.
*-----------------------------------------------------------------------*/
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
/*-----------------------------------------------------------------------
* Now we need to determine strong neighbors of points of pass 1, etc.
* we need to update assigned_offd after each pass
*-----------------------------------------------------------------------*/
pass = 2;
local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt);
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
while (global_pass_array_size && pass < max_num_passes)
{
for (i = pass_array_size-1; i > cnt-1; i--)
{
i1 = pass_array[i];
no_break = 1;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
no_break = 0;
break;
}
}
if (no_break)
{
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
pass_array[i++] = pass_array[cnt];
pass_array[cnt++] = i1;
assigned[i1] = pass;
break;
}
}
}
}
/*hypre_printf("pass %d remaining points %d \n", pass, local_pass_array_size);*/
pass++;
pass_pointer[pass] = cnt;
local_pass_array_size = (HYPRE_BigInt)(pass_array_size - cnt);
hypre_MPI_Allreduce(&local_pass_array_size, &global_pass_array_size, 1, HYPRE_MPI_BIG_INT,
hypre_MPI_SUM, comm);
index = 0;
for (i=0; i < num_sends; i++)
{
start = send_map_start[i];
for (j = start; j < send_map_start[i+1]; j++)
{ int_buf_data[index++] = assigned[send_map_elmt[j]]; }
}
if (num_procs > 1)
{
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, int_buf_data,
assigned_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
}
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
num_passes = pass;
P_diag_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST); /* P_diag_pass[i] will contain
all column numbers for points of pass i */
P_diag_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST);
P_diag_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); /* P_diag_start[i] contains
pointer to begin of column numbers in P_pass for point i,
P_diag_i[i+1] contains number of columns for point i */
P_offd_start = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
P_offd_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
if (cnt_nz_offd)
P_offd_pass[1] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST);
else
P_offd_pass[1] = NULL;
new_elmts = hypre_CTAlloc(HYPRE_BigInt*, num_passes, HYPRE_MEMORY_HOST);
new_counter = hypre_CTAlloc(HYPRE_Int, num_passes+1, HYPRE_MEMORY_HOST);
new_counter[0] = 0;
new_counter[1] = n_coarse_offd;
new_num_cols_offd = n_coarse_offd;
new_elmts[0] = new_col_map_offd;
}
/*-----------------------------------------------------------------------
* Pass 1: now we consider points of pass 1, with strong C_neighbors,
*-----------------------------------------------------------------------*/
cnt_nz = 0;
cnt_nz_offd = 0;
/* JBS: Possible candidate for threading */
for (i=pass_pointer[1]; i < pass_pointer[2]; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (CF_marker[j1] == 1)
{ P_diag_pass[1][cnt_nz++] = fine_to_coarse[j1]; }
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (CF_marker_offd[j1] == 1)
{ P_offd_pass[1][cnt_nz_offd++] = map_S_to_new[j1]; }
}
}
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
if (num_procs > 1)
{
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
Pext_send_map_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_recv_vec_start = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_pass = hypre_CTAlloc(HYPRE_Int*, num_passes, HYPRE_MEMORY_HOST);
Pext_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd+1, HYPRE_MEMORY_HOST);
if (num_cols_offd) Pext_start = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
if (send_map_start[num_sends])
P_ncols = hypre_CTAlloc(HYPRE_Int, send_map_start[num_sends], HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < num_cols_offd+1; i++)
{ Pext_i[i] = 0; }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < send_map_start[num_sends]; i++)
{ P_ncols[i] = 0; }
}
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
for (pass=2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_map_start[pass] = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST);
Pext_recv_vec_start[pass] = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST);
Pext_send_size = 0;
Pext_send_map_start[pass][0] = 0;
for (i=0; i < num_sends; i++)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,j1) reduction(+:Pext_send_size) HYPRE_SMP_SCHEDULE
#endif
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
P_ncols[j] = P_diag_i[j1+1] + P_offd_i[j1+1];
Pext_send_size += P_ncols[j];
}
}
Pext_send_map_start[pass][i+1] = Pext_send_size;
}
comm_handle = hypre_ParCSRCommHandleCreate (11, comm_pkg,
P_ncols, &Pext_i[1]);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST);
Pext_send_buffer = hypre_CTAlloc(HYPRE_BigInt, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
}
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_buffer[cnt_offd++] = my_first_cpt
+ (HYPRE_BigInt) P_diag_pass[pass-1][k];
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
k3 = 0;
while (k3 < pass-1)
{
if (k1 < new_counter[k3+1])
{
k2 = k1-new_counter[k3];
Pext_send_buffer[cnt_offd++] = new_elmts[k3][k2];
break;
}
k3++;
}
}
}
}
}
if (num_procs > 1)
{
Pext_recv_size = 0;
Pext_recv_vec_start[pass][0] = 0;
cnt_offd = 0;
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j<recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
Pext_start[j] = cnt_offd;
cnt_offd += Pext_i[j+1];
}
}
Pext_recv_size = cnt_offd;
Pext_recv_vec_start[pass][i+1] = Pext_recv_size;
}
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
if (Pext_recv_size)
{
Pext_pass[pass] = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST);
new_elmts[pass-1] = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST);
}
else
{
Pext_pass[pass] = NULL;
new_elmts[pass-1] = NULL;
}
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(loc, HYPRE_MEMORY_HOST);
loc = hypre_CTAlloc(HYPRE_Int, Pext_recv_size, HYPRE_MEMORY_HOST);
hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST);
big_temp_pass = hypre_CTAlloc(HYPRE_BigInt, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (21, tmp_comm_pkg,
Pext_send_buffer, big_temp_pass);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
cnt_new = 0;
cnt_offd = 0;
/* JBS: Possible candidate for threading */
for (i=0; i < num_recvs; i++)
{
for (j=recv_vec_start[i]; j < recv_vec_start[i+1]; j++)
{
if (assigned_offd[j] == pass-1)
{
for (j1 = cnt_offd; j1 < cnt_offd+Pext_i[j+1]; j1++)
{
big_k1 = big_temp_pass[j1];
k2 = (HYPRE_Int)(big_k1 - my_first_cpt);
if (k2 > -1 && k2 < n_coarse)
{ Pext_pass[pass][j1] = -k2-1; }
else
{
not_found = 1;
k3 = 0;
while (k3 < pass-1 && not_found)
{
k2 = hypre_BigBinarySearch(new_elmts[k3], big_k1,
(new_counter[k3+1]-new_counter[k3]));
if (k2 > -1)
{
Pext_pass[pass][j1] = k2 + new_counter[k3];
not_found = 0;
}
else
{
k3++;
}
}
if (not_found)
{
new_elmts[pass-1][cnt_new] = big_k1;
loc[cnt_new++] = j1;
}
}
}
cnt_offd += Pext_i[j+1];
}
}
}
if (cnt_new)
{
hypre_BigQsortbi(new_elmts[pass-1],loc,0,cnt_new-1);
cnt = 0;
local_index = new_counter[pass-1];
Pext_pass[pass][loc[0]] = local_index;
for (i=1; i < cnt_new; i++)
{
if (new_elmts[pass-1][i] > new_elmts[pass-1][cnt])
{
new_elmts[pass-1][++cnt] = new_elmts[pass-1][i];
local_index++;
}
Pext_pass[pass][loc[i]] = local_index;
}
new_counter[pass] = local_index+1;
}
else if (num_procs > 1)
new_counter[pass] = new_counter[pass-1];
if (new_num_cols_offd < local_index+1)
{ new_num_cols_offd = local_index+1; }
pass_length = pass_pointer[pass+1] - pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,my_thread_num,num_threads,thread_start,thread_stop,cnt_nz,cnt_nz_offd,i1,j,j1,j_start,j_end,k1,k,P_marker,P_marker_offd)
#endif
{
/* Thread by computing the sparsity structure for this pass only over
* each thread's range of rows. Rows are divided up evenly amongst
* the threads. The necessary thread-wise temporary arrays, like
* P_marker, are initialized and de-allocated internally to the
* parallel region. */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_length; }
else
{ thread_stop = (pass_length/num_threads)*(my_thread_num+1); }
thread_start += pass_pointer[pass];
thread_stop += pass_pointer[pass];
/* Local initializations */
cnt_nz = 0;
cnt_nz_offd = 0;
/* This block of code is to go to the top of the parallel region starting before
* the loop over num_passes. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); /* marks points to see if they're counted */
for (i=0; i < n_coarse; i++)
{ P_marker[i] = -1; }
if (new_num_cols_offd == local_index+1)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = -1; }
}
else if (n_coarse_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);
for (i=0; i < n_coarse_offd; i++)
{ P_marker_offd[i] = -1; }
}
/* Need some variables to store each threads cnt_nz and cnt_nz_offd, and
* then stitch things together as in par_interp.c
* This loop writes
* P_diag_i, P_offd_i: data parallel here, and require no special treatment
* P_diag_start, P_offd_start: are not data parallel, require special treatment
*/
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] = cnt_nz;
P_offd_start[i1] = cnt_nz_offd;
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[k1] = i1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
j_start = 0;
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != i1)
{
cnt_nz++;
P_diag_i[i1+1]++;
P_marker[-k1-1] = i1;
}
}
else if (P_marker_offd[k1] != i1)
{
cnt_nz_offd++;
P_offd_i[i1+1]++;
P_marker_offd[k1] = i1;
}
}
}
}
}
/* Update P_diag_start, P_offd_start with cumulative
* nonzero counts over all threads */
if(my_thread_num == 0)
{ max_num_threads[0] = num_threads; }
cnt_nz_offd_per_thread[my_thread_num] = cnt_nz_offd;
cnt_nz_per_thread[my_thread_num] = cnt_nz;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num == 0)
{
for(i = 1; i < max_num_threads[0]; i++)
{
cnt_nz_offd_per_thread[i] += cnt_nz_offd_per_thread[i-1];
cnt_nz_per_thread[i] += cnt_nz_per_thread[i-1];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if(my_thread_num > 0)
{
/* update this thread's section of P_diag_start and P_offd_start
* with the num of nz's counted by previous threads */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
P_diag_start[i1] += cnt_nz_per_thread[my_thread_num-1];
P_offd_start[i1] += cnt_nz_offd_per_thread[my_thread_num-1];
}
}
else /* if my_thread_num == 0 */
{
/* Grab the nz count for all threads */
cnt_nz = cnt_nz_per_thread[max_num_threads[0]-1];
cnt_nz_offd = cnt_nz_offd_per_thread[max_num_threads[0]-1];
/* Updated total nz count */
total_nz += cnt_nz;
total_nz_offd += cnt_nz_offd;
/* Allocate P_diag_pass and P_offd_pass for all threads */
P_diag_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz, HYPRE_MEMORY_HOST);
if (cnt_nz_offd)
P_offd_pass[pass] = hypre_CTAlloc(HYPRE_Int, cnt_nz_offd, HYPRE_MEMORY_HOST);
else if (num_procs > 1)
P_offd_pass[pass] = NULL;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* offset cnt_nz and cnt_nz_offd to point to the starting
* point in P_diag_pass and P_offd_pass for each thread */
if(my_thread_num > 0)
{
cnt_nz = cnt_nz_per_thread[my_thread_num-1];
cnt_nz_offd = cnt_nz_offd_per_thread[my_thread_num-1];
}
else
{
cnt_nz = 0;
cnt_nz_offd = 0;
}
/* Set P_diag_pass and P_offd_pass */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_start[j1];
j_end = j_start+P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_diag_pass[pass-1][k];
if (P_marker[k1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = k1;
P_marker[k1] = -i1-1;
}
}
j_start = P_offd_start[j1];
j_end = j_start+P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = P_offd_pass[pass-1][k];
if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
if (k1 < 0)
{
if (P_marker[-k1-1] != -i1-1)
{
P_diag_pass[pass][cnt_nz++] = -k1-1;
P_marker[-k1-1] = -i1-1;
}
}
else if (P_marker_offd[k1] != -i1-1)
{
P_offd_pass[pass][cnt_nz_offd++] = k1;
P_marker_offd[k1] = -i1-1;
}
}
}
}
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if ( (n_coarse_offd) || (new_num_cols_offd == local_index+1) )
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End parallel region */
}
hypre_TFree(loc, HYPRE_MEMORY_HOST);
hypre_TFree(P_ncols, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_buffer, HYPRE_MEMORY_HOST);
hypre_TFree(big_temp_pass, HYPRE_MEMORY_HOST);
hypre_TFree(new_recv_vec_start, HYPRE_MEMORY_HOST);
hypre_TFree(cnt_nz_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(cnt_nz_offd_per_thread, HYPRE_MEMORY_HOST);
hypre_TFree(max_num_threads, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, total_nz, HYPRE_MEMORY_SHARED);
P_diag_data = hypre_CTAlloc(HYPRE_Real, total_nz, HYPRE_MEMORY_SHARED);
if (total_nz_offd)
{
P_offd_j = hypre_CTAlloc(HYPRE_Int, total_nz_offd, HYPRE_MEMORY_SHARED);
P_offd_data = hypre_CTAlloc(HYPRE_Real, total_nz_offd, HYPRE_MEMORY_SHARED);
}
for (i=0; i < n_fine; i++)
{
P_diag_i[i+1] += P_diag_i[i];
P_offd_i[i+1] += P_offd_i[i];
}
/* determine P for coarse points */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,i1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_coarse; i++)
{
i1 = C_array[i];
P_diag_j[P_diag_i[i1]] = fine_to_coarse[i1];
P_diag_data[P_diag_i[i1]] = 1.0;
}
if (weight_option) /*if this is set, weights are separated into
negative and positive offdiagonals and accumulated
accordingly */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_pos,sum_C_neg,sum_N_pos,sum_N_neg,j_start,j_end,j,k1,cnt,j1,cnt_offd,diagonal,alfa,beta)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_pos = 0;
sum_C_neg = 0;
sum_N_pos = 0;
sum_N_neg = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
P_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
if (j1 != -1 && P_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
if (A_diag_data[j] < 0)
sum_C_neg += A_diag_data[j];
else
sum_C_pos += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
P_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
{
if (A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
if (j1 != -1 && P_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
if (A_offd_data[j] < 0)
sum_C_neg += A_offd_data[j];
else
sum_C_pos += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End Parallel Region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
/*if (!col_offd_S_to_A) hypre_TFree(map_A_to_new);*/
if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_diag_data[k]; }
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{ Pext_send_data[cnt_offd++] = P_offd_data[k]; }
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,P_marker,P_marker_offd,i,i1,sum_C_neg,sum_C_pos,sum_N_neg,sum_N_pos,j_start,j_end,cnt,j,k1,cnt_offd,j1,k,alfa,beta,diagonal,C_array,C_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
for (i=0; i < n_fine; i++)
{ P_marker[i] = -1; }
if (num_cols_offd)
{
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd; i++)
P_marker_offd[i] = -1;
}
C_array = NULL;
C_array_offd = NULL;
if (n_coarse)
{ C_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); }
if (new_num_cols_offd > n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else if (n_coarse_offd)
{ C_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST); }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
/* Loop over each thread's row-range */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C_neg = 0;
sum_C_pos = 0;
sum_N_neg = 0;
sum_N_pos = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
C_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
C_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
P_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
P_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (P_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[C_array[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
{
if (A_diag_data[j] < 0)
sum_N_neg += A_diag_data[j];
else
sum_N_pos += A_diag_data[j];
}
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (j1 > -1 && P_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[C_array[-k1-1]] += alfa;
else
P_offd_data[C_array_offd[k1]] += alfa;
if (alfa < 0)
{
sum_C_neg += alfa;
sum_N_neg += alfa;
}
else
{
sum_C_pos += alfa;
sum_N_pos += alfa;
}
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
{
if ( A_offd_data[j] < 0)
sum_N_neg += A_offd_data[j];
else
sum_N_pos += A_offd_data[j];
}
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C_neg*diagonal != 0) alfa = -sum_N_neg/(sum_C_neg*diagonal);
if (sum_C_pos*diagonal != 0) beta = -sum_N_pos/(sum_C_pos*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
if (P_diag_data[j] < 0)
P_diag_data[j] *= alfa;
else
P_diag_data[j] *= beta;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
if (P_offd_data[j] < 0)
P_offd_data[j] *= alfa;
else
P_offd_data[j] *= beta;
}
hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
if (num_cols_offd)
{ hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST); }
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST);
}
} /* End num_passes for-loop */
}
else /* no distinction between positive and negative offdiagonal element */
{
pass_length = pass_pointer[2]-pass_pointer[1];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for pass one. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[1] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[1] + pass_length; }
else
{ thread_stop = pass_pointer[1] + (pass_length/num_threads)*(my_thread_num+1); }
/* determine P for points of pass 1, i.e. neighbors of coarse points */
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[1][j];
tmp_marker[C_array[k1]] = i1;
}
cnt = P_diag_i[i1];
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
if (j1 != -1 && tmp_marker[j1] == i1)
{
P_diag_data[cnt] = A_diag_data[j];
P_diag_j[cnt++] = fine_to_coarse[j1];
sum_C += A_diag_data[j];
}
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[1][j];
tmp_marker_offd[C_array_offd[k1]] = i1;
}
cnt_offd = P_offd_i[i1];
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func_offd[j1]))
sum_N += A_offd_data[j];
if (j1 != -1 && tmp_marker_offd[j1] == i1)
{
P_offd_data[cnt_offd] = A_offd_data[j];
P_offd_j[cnt_offd++] = map_S_to_new[j1];
sum_C += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal != 0) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < cnt; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < cnt_offd; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST);
} /* end OMP parallel region */
old_Pext_send_size = 0;
old_Pext_recv_size = 0;
if (n_coarse) hypre_TFree(C_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_array_offd, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass[1], HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(P_offd_pass[1], HYPRE_MEMORY_HOST);
for (pass = 2; pass < num_passes; pass++)
{
if (num_procs > 1)
{
Pext_send_size = Pext_send_map_start[pass][num_sends];
if (Pext_send_size > old_Pext_send_size)
{
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
Pext_send_data = hypre_CTAlloc(HYPRE_Real, Pext_send_size, HYPRE_MEMORY_HOST);
}
old_Pext_send_size = Pext_send_size;
cnt_offd = 0;
for (i=0; i < num_sends; i++)
{
for (j=send_map_start[i]; j < send_map_start[i+1]; j++)
{
j1 = send_map_elmt[j];
if (assigned[j1] == pass-1)
{
j_start = P_diag_i[j1];
j_end = P_diag_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_diag_data[k];
}
j_start = P_offd_i[j1];
j_end = P_offd_i[j1+1];
for (k=j_start; k < j_end; k++)
{
Pext_send_data[cnt_offd++] = P_offd_data[k];
}
}
}
}
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) =
Pext_send_map_start[pass];
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) =
Pext_recv_vec_start[pass];
Pext_recv_size = Pext_recv_vec_start[pass][num_recvs];
if (Pext_recv_size > old_Pext_recv_size)
{
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
Pext_data = hypre_CTAlloc(HYPRE_Real, Pext_recv_size, HYPRE_MEMORY_HOST);
}
old_Pext_recv_size = Pext_recv_size;
comm_handle = hypre_ParCSRCommHandleCreate (1, tmp_comm_pkg,
Pext_send_data, Pext_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(Pext_send_map_start[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start[pass], HYPRE_MEMORY_HOST);
}
pass_length = pass_pointer[pass+1]-pass_pointer[pass];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(thread_start,thread_stop,my_thread_num,num_threads,k,k1,i,i1,j,j1,sum_C,sum_N,j_start,j_end,cnt,tmp_marker,tmp_marker_offd,cnt_offd,diagonal,alfa,tmp_array,tmp_array_offd)
#endif
{
/* Sparsity structure is now finished. Next, calculate interpolation
* weights for passes >= 2. Thread by computing the interpolation
* weights only over each thread's range of rows. Rows are divided
* up evenly amongst the threads. */
/* Initialize thread-wise variables */
tmp_marker = NULL;
if (n_fine)
{ tmp_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST); }
tmp_marker_offd = NULL;
if (num_cols_offd)
{ tmp_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
tmp_array = NULL;
if (n_coarse)
{ tmp_array = hypre_CTAlloc(HYPRE_Int, n_coarse, HYPRE_MEMORY_HOST); }
tmp_array_offd = NULL;
if (new_num_cols_offd > n_coarse_offd)
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else
{ tmp_array_offd = hypre_CTAlloc(HYPRE_Int, n_coarse_offd, HYPRE_MEMORY_HOST);}
for (i=0; i < n_fine; i++)
{ tmp_marker[i] = -1; }
for (i=0; i < num_cols_offd; i++)
{ tmp_marker_offd[i] = -1; }
/* Compute this thread's range of pass_length */
my_thread_num = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
thread_start = pass_pointer[pass] + (pass_length/num_threads)*my_thread_num;
if (my_thread_num == num_threads-1)
{ thread_stop = pass_pointer[pass] + pass_length; }
else
{ thread_stop = pass_pointer[pass] + (pass_length/num_threads)*(my_thread_num+1); }
for (i=thread_start; i < thread_stop; i++)
{
i1 = pass_array[i];
sum_C = 0;
sum_N = 0;
j_start = P_diag_start[i1];
j_end = j_start+P_diag_i[i1+1]-P_diag_i[i1];
cnt = P_diag_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_diag_pass[pass][j];
tmp_array[k1] = cnt;
P_diag_data[cnt] = 0;
P_diag_j[cnt++] = k1;
}
j_start = P_offd_start[i1];
j_end = j_start+P_offd_i[i1+1]-P_offd_i[i1];
cnt_offd = P_offd_i[i1];
for (j=j_start; j < j_end; j++)
{
k1 = P_offd_pass[pass][j];
tmp_array_offd[k1] = cnt_offd;
P_offd_data[cnt_offd] = 0;
P_offd_j[cnt_offd++] = k1;
}
for (j=S_diag_i[i1]; j < S_diag_i[i1+1]; j++)
{
j1 = S_diag_j[j];
if (assigned[j1] == pass-1)
tmp_marker[j1] = i1;
}
for (j=S_offd_i[i1]; j < S_offd_i[i1+1]; j++)
{
j1 = S_offd_j[j];
if (assigned_offd[j1] == pass-1)
tmp_marker_offd[j1] = i1;
}
for (j=A_diag_i[i1]+1; j < A_diag_i[i1+1]; j++)
{
j1 = A_diag_j[j];
if (tmp_marker[j1] == i1)
{
for (k=P_diag_i[j1]; k < P_diag_i[j1+1]; k++)
{
k1 = P_diag_j[k];
alfa = A_diag_data[j]*P_diag_data[k];
P_diag_data[tmp_array[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
for (k=P_offd_i[j1]; k < P_offd_i[j1+1]; k++)
{
k1 = P_offd_j[k];
alfa = A_diag_data[j]*P_offd_data[k];
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker[j1] != -3 &&
(num_functions == 1 || dof_func[i1] == dof_func[j1]))
sum_N += A_diag_data[j];
}
}
for (j=A_offd_i[i1]; j < A_offd_i[i1+1]; j++)
{
if (col_offd_S_to_A)
j1 = map_A_to_S[A_offd_j[j]];
else
j1 = A_offd_j[j];
if (j1 > -1 && tmp_marker_offd[j1] == i1)
{
j_start = Pext_start[j1];
j_end = j_start+Pext_i[j1+1];
for (k=j_start; k < j_end; k++)
{
k1 = Pext_pass[pass][k];
alfa = A_offd_data[j]*Pext_data[k];
if (k1 < 0)
P_diag_data[tmp_array[-k1-1]] += alfa;
else
P_offd_data[tmp_array_offd[k1]] += alfa;
sum_C += alfa;
sum_N += alfa;
}
}
else
{
if (CF_marker_offd[j1] != -3 &&
(num_functions == 1 || dof_func_offd[j1] == dof_func[i1]))
sum_N += A_offd_data[j];
}
}
diagonal = A_diag_data[A_diag_i[i1]];
if (sum_C*diagonal) alfa = -sum_N/(sum_C*diagonal);
for (j=P_diag_i[i1]; j < P_diag_i[i1+1]; j++)
P_diag_data[j] *= alfa;
for (j=P_offd_i[i1]; j < P_offd_i[i1+1]; j++)
P_offd_data[j] *= alfa;
}
hypre_TFree(tmp_marker, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_array, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_array_offd, HYPRE_MEMORY_HOST);
} /* End OMP Parallel Section */
hypre_TFree(P_diag_pass[pass], HYPRE_MEMORY_HOST);
if (num_procs > 1)
{
hypre_TFree(P_offd_pass[pass], HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass[pass], HYPRE_MEMORY_HOST);
}
}
}
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_map_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_recv_vec_start, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_send_data, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_data, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_pass, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_pass, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_pass, HYPRE_MEMORY_HOST);
hypre_TFree(P_diag_start, HYPRE_MEMORY_HOST);
hypre_TFree(P_offd_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_start, HYPRE_MEMORY_HOST);
hypre_TFree(Pext_i, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(assigned, HYPRE_MEMORY_HOST);
hypre_TFree(assigned_offd, HYPRE_MEMORY_HOST);
hypre_TFree(pass_pointer, HYPRE_MEMORY_HOST);
hypre_TFree(pass_array, HYPRE_MEMORY_HOST);
hypre_TFree(map_S_to_new, HYPRE_MEMORY_HOST);
hypre_TFree(map_A_to_S, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(A),
total_global_cpts,
hypre_ParCSRMatrixColStarts(A),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max
and/or keep yat most <P_max_elmts> per row absolutely maximal coefficients */
if (trunc_factor != 0.0 || P_max_elmts != 0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, P_max_elmts);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
}
P_offd_size = P_offd_i[n_fine];
num_cols_offd_P = 0;
if (P_offd_size)
{
if (new_num_cols_offd > num_cols_offd)
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int, new_num_cols_offd, HYPRE_MEMORY_HOST); }
else
{ P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST); }
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_num_cols_offd; i++)
{ P_marker_offd[i] = 0; }
num_cols_offd_P = 0;
for (i=0; i < P_offd_size; i++)
{
index = P_offd_j[i];
if (!P_marker_offd[index])
{
num_cols_offd_P++;
P_marker_offd[index] = 1;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_P, HYPRE_MEMORY_HOST);
permute = hypre_CTAlloc(HYPRE_Int, new_counter[num_passes-1], HYPRE_MEMORY_HOST);
big_permute = hypre_CTAlloc(HYPRE_BigInt, new_counter[num_passes-1], HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
big_permute[i] = -1;
cnt = 0;
for (i=0; i < num_passes-1; i++)
{
for (j=new_counter[i]; j < new_counter[i+1]; j++)
{
if (P_marker_offd[j])
{
col_map_offd_P[cnt] = new_elmts[i][j-(HYPRE_BigInt)new_counter[i]];
big_permute[j] = col_map_offd_P[cnt++];
}
}
}
hypre_BigQsort0(col_map_offd_P,0,num_cols_offd_P-1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,big_k1) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < new_counter[num_passes-1]; i++)
{
big_k1 = big_permute[i];
if (big_k1 != -1)
permute[i] = hypre_BigBinarySearch(col_map_offd_P,big_k1,num_cols_offd_P);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
{ P_offd_j[i] = permute[P_offd_j[i]]; }
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
if (num_procs > 1)
{
for (i=0; i < num_passes-1; i++)
hypre_TFree(new_elmts[i], HYPRE_MEMORY_HOST);
}
hypre_TFree(permute, HYPRE_MEMORY_HOST);
hypre_TFree(big_permute, HYPRE_MEMORY_HOST);
hypre_TFree(new_elmts, HYPRE_MEMORY_HOST);
hypre_TFree(new_counter, HYPRE_MEMORY_HOST);
if (num_cols_offd_P)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_offd_P;
}
if (n_SF)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < n_fine; i++)
if (CF_marker[i] == -3) CF_marker[i] = -1;
}
if (num_procs > 1)
{
hypre_MatvecCommPkgCreate(P);
}
*P_ptr = P;
/* wall_time = hypre_MPI_Wtime() - wall_time;
hypre_printf("TOTAL TIME %1.2e \n",wall_time); */
/*-----------------------------------------------------------------------
* Build and return dof_func array for coarse grid.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Free mapping vector and marker array.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MULTIPASS_INTERP] += hypre_MPI_Wtime();
#endif
return(0);
}
|
r_direct_o1.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <complex.h>
//#include <omp.h>
#include "config.h"
#include "cint.h"
#include "optimizer.h"
#include "nr_direct.h"
#include "time_rev.h"
int GTOmax_shell_dim(const int *ao_loc, const int *shls_slice, int ncenter);
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
#define DECLARE_ALL \
const int *atm = envs->atm; \
const int *bas = envs->bas; \
const double *env = envs->env; \
const int natm = envs->natm; \
const int nbas = envs->nbas; \
const int *ao_loc = envs->ao_loc; \
const int *shls_slice = envs->shls_slice; \
const int *tao = envs->tao; \
const CINTOpt *cintopt = envs->cintopt; \
const int nao = ao_loc[nbas]; \
const int di = ao_loc[ish+1] - ao_loc[ish]; \
const int dj = ao_loc[jsh+1] - ao_loc[jsh]; \
const int dim = GTOmax_shell_dim(ao_loc, shls_slice+4, 2); \
double *cache = (double *)(buf + di * dj * dim * dim * ncomp); \
int (*fprescreen)(); \
int (*r_vkscreen)(); \
if (vhfopt) { \
fprescreen = vhfopt->fprescreen; \
r_vkscreen = vhfopt->r_vkscreen; \
} else { \
fprescreen = CVHFnoscreen; \
r_vkscreen = CVHFr_vknoscreen; \
}
static void transpose01324(double complex * __restrict__ a,
double complex * __restrict__ at,
int di, int dj, int dk, int dl, int ncomp)
{
int i, j, k, l, m, ic;
int dij = di * dj;
int dijk = dij * dk;
double complex *pa;
m = 0;
for (ic = 0; ic < ncomp; ic++) {
for (l = 0; l < dl; l++) {
for (j = 0; j < dj; j++) {
pa = a + j*di;
for (k = 0; k < dk; k++) {
for (i = 0; i < di; i++) {
at[m] = pa[i];
m++;
}
pa += dij;
}
}
a += dijk;
}
}
}
/*
* for given ksh, lsh, loop all ish, jsh
*/
void CVHFdot_rs1(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
DECLARE_ALL;
const size_t nao2 = nao * nao;
int idm, ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *pv;
double *dms_cond[n_dm+1];
double dm_atleast;
void (*pf)();
// to make fjk compatible to C-contiguous dm array, put ksh, lsh inner loop
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh < nbas; ksh++) {
for (lsh = 0; lsh < nbas; lsh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
// append buf.transpose(0,2,1,3) to eris, to reduce the cost of r_direct_dot
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
dijkl = di * dj * dk * dl;
if ((*r_vkscreen)(shls, vhfopt, dms_cond, n_dm,
&dm_atleast, atm, bas, env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
}
} }
}
/*
* for given ish, jsh, loop all ksh > lsh
*/
static void dot_rs2sub(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh, int ksh_count,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
DECLARE_ALL;
const size_t nao2 = nao * nao;
int idm, ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *pv;
double *dms_cond[n_dm+1];
double dm_atleast;
void (*pf)();
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh < ksh_count; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
dijkl = di * dj * dk * dl;
if ((*r_vkscreen)(shls, vhfopt, dms_cond, n_dm,
&dm_atleast, atm, bas, env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
}
} }
}
void CVHFdot_rs2ij(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ish >= jsh) {
CVHFdot_rs1(intor, fjk, dms, vjk, buf, n_dm, ncomp,
ish, jsh, vhfopt, envs);
}
}
void CVHFdot_rs2kl(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
dot_rs2sub(intor, fjk, dms, vjk, buf, n_dm, ncomp,
ish, jsh, envs->nbas, vhfopt, envs);
}
void CVHFdot_rs4(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ish >= jsh) {
dot_rs2sub(intor, fjk, dms, vjk, buf, n_dm, ncomp,
ish, jsh, envs->nbas, vhfopt, envs);
}
}
void CVHFdot_rs8(int (*intor)(), void (**fjk)(),
double complex **dms, double complex *vjk, double complex *buf,
int n_dm, int ncomp, int ish, int jsh,
CVHFOpt *vhfopt, IntorEnvs *envs)
{
if (ish < jsh) {
return;
}
DECLARE_ALL;
const size_t nao2 = nao * nao;
int idm, ksh, lsh, dk, dl, dijkl;
int shls[4];
double complex *pv;
double *dms_cond[n_dm+1];
double dm_atleast;
void (*pf)();
// to make fjk compatible to C-contiguous dm array, put ksh, lsh inner loop
shls[0] = ish;
shls[1] = jsh;
for (ksh = 0; ksh <= ish; ksh++) {
for (lsh = 0; lsh <= ksh; lsh++) {
/* when ksh==ish, (lsh<jsh) misses some integrals (eg k<i&&l>j).
* These integrals are calculated in the next (ish,jsh) pair. To show
* that, we just need to prove that every elements in shell^4 appeared
* only once in fjk_s8. */
if ((ksh == ish) && (lsh > jsh)) {
break;
}
dk = ao_loc[ksh+1] - ao_loc[ksh];
dl = ao_loc[lsh+1] - ao_loc[lsh];
shls[2] = ksh;
shls[3] = lsh;
if ((*fprescreen)(shls, vhfopt, atm, bas, env)) {
if ((*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
dijkl = di * dj * dk * dl;
if ((*r_vkscreen)(shls, vhfopt, dms_cond, n_dm,
&dm_atleast, atm, bas, env)) {
transpose01324(buf, buf+dijkl*ncomp,
di, dj, dk, dl, ncomp);
}
pv = vjk;
for (idm = 0; idm < n_dm; idm++) {
pf = fjk[idm];
(*pf)(buf, dms[idm], pv, nao, ncomp,
shls, ao_loc, tao,
dms_cond[idm], nbas, dm_atleast);
pv += nao2 * ncomp;
}
}
}
} }
}
/*
* drv loop over ij, generate eris of kl for given ij, call fjk to
* calculate vj, vk.
*
* n_dm is the number of dms for one [array(ij|kl)],
* ncomp is the number of components that produced by intor
*/
void CVHFr_direct_drv(int (*intor)(), void (*fdot)(), void (**fjk)(),
double complex **dms, double complex *vjk,
int n_dm, int ncomp, int *shls_slice, int *ao_loc,
CINTOpt *cintopt, CVHFOpt *vhfopt,
int *atm, int natm, int *bas, int nbas, double *env)
{
const size_t nao = ao_loc[nbas];
int *tao = malloc(sizeof(int)*nao);
CVHFtimerev_map(tao, bas, nbas);
IntorEnvs envs = {natm, nbas, atm, bas, env, shls_slice, ao_loc, tao,
cintopt, ncomp};
memset(vjk, 0, sizeof(double complex)*nao*nao*n_dm*ncomp);
const int di = GTOmax_shell_dim(ao_loc, shls_slice, 4);
const int cache_size = GTOmax_cache_size(intor, shls_slice, 4,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
int i, j, ij;
double complex *v_priv = malloc(sizeof(double complex)*nao*nao*n_dm*ncomp);
memset(v_priv, 0, sizeof(double complex)*nao*nao*n_dm*ncomp);
int bufsize = di*di*di*di*ncomp;
bufsize = bufsize + MAX(bufsize, cache_size/2);
double complex *buf = malloc(sizeof(double complex) * bufsize);
#pragma omp for nowait schedule(dynamic)
for (ij = 0; ij < nbas*nbas; ij++) {
i = ij / nbas;
j = ij - i * nbas;
(*fdot)(intor, fjk, dms, v_priv, buf, n_dm, ncomp, i, j,
vhfopt, &envs);
}
#pragma omp critical
{
for (i = 0; i < nao*nao*n_dm*ncomp; i++) {
vjk[i] += v_priv[i];
}
}
free(v_priv);
free(buf);
}
free(tao);
}
|
yoloDection.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#ifndef YOLODECTION_H
#define YOLODECTION_H
#include <math.h>
#include <vector>
#include <algorithm>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include "net.h"
struct BBoxRect
{
float xmin;
float ymin;
float xmax;
float ymax;
int label;
};
class yoloDection
{
public:
yoloDection();
yoloDection(int classN, int boxN, float conf_thre, float nms_thre, ncnn::Mat biases,
ncnn::Mat mask, ncnn::Mat anchors_scale, int mask_group):
num_class(classN),num_box(boxN), confidence_threshold(conf_thre), nms_threshold(nms_thre),
biases(biases), mask(mask), anchors_scale(anchors_scale), mask_group_num(mask_group){}
float sigmoid(float x);
float intersection_area(const BBoxRect& a, const BBoxRect& b);
template <typename T> void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right);
template <typename T> void qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores);
void nms_sorted_bboxes(const std::vector<BBoxRect>& bboxes, std::vector<int>& picked, float nms_threshold);
int detection(const std::vector<ncnn::Mat>& bottom_blobs, ncnn::Mat& top_blob);
public:
int num_class;
int num_box;
float confidence_threshold;
float nms_threshold;
ncnn::Mat biases;
ncnn::Mat mask;
ncnn::Mat anchors_scale;
int mask_group_num;
};
inline float yoloDection::intersection_area(const BBoxRect& a, const BBoxRect& b)
{
if (a.xmin > b.xmax || a.xmax < b.xmin || a.ymin > b.ymax || a.ymax < b.ymin)
{
// no intersection
return 0.f;
}
float inter_width = std::min(a.xmax, b.xmax) - std::max(a.xmin, b.xmin);
float inter_height = std::min(a.ymax, b.ymax) - std::max(a.ymin, b.ymin);
return inter_width * inter_height;
}
template <typename T>
inline void yoloDection::qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores, int left, int right)
{
int i = left;
int j = right;
float p = scores[(left + right) / 2];
while (i <= j)
{
while (scores[i] > p)
i++;
while (scores[j] < p)
j--;
if (i <= j)
{
// swap
std::swap(datas[i], datas[j]);
std::swap(scores[i], scores[j]);
i++;
j--;
}
}
if (left < j)
qsort_descent_inplace(datas, scores, left, j);
if (i < right)
qsort_descent_inplace(datas, scores, i, right);
}
template <typename T>
inline void yoloDection::qsort_descent_inplace(std::vector<T>& datas, std::vector<float>& scores)
{
if (datas.empty() || scores.empty())
return;
qsort_descent_inplace(datas, scores, 0, scores.size() - 1);
}
inline void yoloDection::nms_sorted_bboxes(const std::vector<BBoxRect>& bboxes, std::vector<int>& picked, float nms_threshold)
{
picked.clear();
const int n = bboxes.size();
std::vector<float> areas(n);
for (int i = 0; i < n; i++)
{
const BBoxRect& r = bboxes[i];
float width = r.xmax - r.xmin;
float height = r.ymax - r.ymin;
areas[i] = width * height;
}
for (int i = 0; i < n; i++)
{
const BBoxRect& a = bboxes[i];
int keep = 1;
for (int j = 0; j < (int)picked.size(); j++)
{
const BBoxRect& b = bboxes[picked[j]];
// intersection over union
float inter_area = intersection_area(a, b);
float union_area = areas[i] + areas[picked[j]] - inter_area;
// float IoU = inter_area / union_area
if (inter_area / union_area > nms_threshold)
keep = 0;
}
if (keep)
picked.push_back(i);
}
}
inline float yoloDection::sigmoid(float x)
{
return 1.f / (1.f + exp(-x));
}
inline int yoloDection::detection(const std::vector<ncnn::Mat>& bottom_blobs, ncnn::Mat& top_blob)
{
// gather all box
std::vector<BBoxRect> all_bbox_rects;
std::vector<float> all_bbox_scores;
for (size_t b = 0; b < bottom_blobs.size(); b++)
{
fprintf(stderr, "biases: %f\n", biases[b]);
std::vector< std::vector<BBoxRect> > all_box_bbox_rects;
std::vector< std::vector<float> > all_box_bbox_scores;
all_box_bbox_rects.resize(num_box);
all_box_bbox_scores.resize(num_box);
const ncnn::Mat& bottom_top_blobs = bottom_blobs[b];
int w = bottom_top_blobs.w;
int h = bottom_top_blobs.h;
int channels = bottom_top_blobs.c;
//printf("%d %d %d\n", w, h, channels);
const int channels_per_box = channels / num_box;
// anchor coord + box score + num_class
if (channels_per_box != 4 + 1 + num_class)
return -1;
int mask_offset = b * num_box;
int net_w = (int)(anchors_scale[b] * w);
int net_h = (int)(anchors_scale[b] * h);
//printf("%d %d\n", net_w, net_h);
//printf("%d %d %d\n", w, h, channels);
//#pragma omp parallel for num_threads(4)
for (int pp = 0; pp < num_box; pp++)
{
int p = pp * channels_per_box;
int biases_index = mask[pp + mask_offset];
//printf("%d\n", biases_index);
const float bias_w = biases[biases_index * 2];
const float bias_h = biases[biases_index * 2 + 1];
//printf("%f %f\n", bias_w, bias_h);
const float* xptr = bottom_top_blobs.channel(p);
const float* yptr = bottom_top_blobs.channel(p + 1);
const float* wptr = bottom_top_blobs.channel(p + 2);
const float* hptr = bottom_top_blobs.channel(p + 3);
const float* box_score_ptr = bottom_top_blobs.channel(p + 4);
// softmax class scores
ncnn::Mat scores = bottom_top_blobs.channel_range(p + 5, num_class);
//softmax->forward_inplace(scores, opt);
for (int i = 0; i < h; i++)
{
for (int j = 0; j < w; j++)
{
// box score
float box_score = sigmoid(box_score_ptr[0]);
// find class index with max class score
int class_index = 0;
float class_score = 0.f;
for (int q = 0; q < num_class; q++)
{
float score = sigmoid(scores.channel(q).row(i)[j]);
if (score > class_score)
{
class_index = q;
class_score = score;
}
}
//printf( "%d %f %f\n", class_index, box_score, class_score);
float confidence = box_score * class_score;
if (confidence >= confidence_threshold)
{
// region box
float bbox_cx = (j + sigmoid(xptr[0])) / w;
float bbox_cy = (i + sigmoid(yptr[0])) / h;
//float bbox_w = exp(wptr[0]) / net_w;
//float bbox_h = exp(hptr[0]) / net_h;
float bbox_w = exp(wptr[0]) * bias_w / net_w;
float bbox_h = exp(hptr[0]) * bias_h / net_h;
float bbox_xmin = bbox_cx - bbox_w * 0.5f;
float bbox_ymin = bbox_cy - bbox_h * 0.5f;
float bbox_xmax = bbox_cx + bbox_w * 0.5f;
float bbox_ymax = bbox_cy + bbox_h * 0.5f;
BBoxRect c = { bbox_xmin, bbox_ymin, bbox_xmax, bbox_ymax, class_index };
all_box_bbox_rects[pp].push_back(c);
all_box_bbox_scores[pp].push_back(confidence);
}
xptr++;
yptr++;
wptr++;
hptr++;
box_score_ptr++;
}
}
}
for (int i = 0; i < num_box; i++)
{
const std::vector<BBoxRect>& box_bbox_rects = all_box_bbox_rects[i];
const std::vector<float>& box_bbox_scores = all_box_bbox_scores[i];
all_bbox_rects.insert(all_bbox_rects.end(), box_bbox_rects.begin(), box_bbox_rects.end());
all_bbox_scores.insert(all_bbox_scores.end(), box_bbox_scores.begin(), box_bbox_scores.end());
}
}
// global sort inplace
qsort_descent_inplace(all_bbox_rects, all_bbox_scores);
// apply nms
std::vector<int> picked;
nms_sorted_bboxes(all_bbox_rects, picked, nms_threshold);
// select
std::vector<BBoxRect> bbox_rects;
std::vector<float> bbox_scores;
for (int i = 0; i < (int)picked.size(); i++)
{
int z = picked[i];
bbox_rects.push_back(all_bbox_rects[z]);
bbox_scores.push_back(all_bbox_scores[z]);
}
// fill result
int num_detected = bbox_rects.size();
if (num_detected == 0)
return 0;
top_blob.create(6, num_detected, 4u, 0);
if (top_blob.empty())
return -100;
for (int i = 0; i < num_detected; i++)
{
const BBoxRect& r = bbox_rects[i];
float score = bbox_scores[i];
float* outptr = top_blob.row(i);
outptr[0] = r.label + 1;// +1 for prepend background class
outptr[1] = score;
outptr[2] = r.xmin;
outptr[3] = r.ymin;
outptr[4] = r.xmax;
outptr[5] = r.ymax;
}
return 0;
}
#endif // YOLODECTION_H
|
MSCHAPv2_bs_fmt_plug.c | /*
* MSCHAPv2_fmt.c -- Microsoft PPP CHAP Extensions, Version 2
*
* Written by JoMo-Kun <jmk at foofus.net> in 2010
* and placed in the public domain.
*
* Modified for performance, OMP and utf-8 support
* by magnum 2010-2011, no rights reserved
*
* Modified for using Bitsliced DES by Deepika Dutta Mishra
* <dipikadutta at gmail.com> in 2012, no rights reserved.
*
* Support for freeradius-wep-patch challenge/response format
* added by Linus Lüssing in 2012 and is licensed under CC0/PD terms:
* To the extent possible under law, Linus Lüssing has waived all copyright
* and related or neighboring rights to this work. This work is published from: Germany.
*
* This algorithm is designed for performing brute-force cracking of the
* MSCHAPv2 challenge/response sets exchanged during network-based
* authentication attempts. The captured challenge/response set from these
* attempts should be stored using the following format:
*
* USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* USERNAME::DOMAIN:AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* DOMAIN\USERNAME:::AUTHENTICATOR CHALLENGE:MSCHAPv2 RESPONSE:PEER CHALLENGE
* :::MSCHAPv2 CHALLENGE:MSCHAPv2 RESPONSE:
*
* For example:
* User:::5B5D7C7D7B3F2F3E3C2C602132262628:82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF:21402324255E262A28295F2B3A337C7E
* domain\fred:::56d64cbe7bad61349a0b752335100eaf:d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b:7f8a466cff2a6bf0c80218bbf56d76bc
*
* http://freeradius.org/rfc/rfc2759.txt
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_MSCHAPv2_old;
#elif FMT_REGISTERS_H
john_register_one(&fmt_MSCHAPv2_old);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "DES_std.h"
#include "DES_bs.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "options.h"
#include "memory.h"
#include "sha.h"
#include "unicode.h"
#include "memdbg.h"
#ifndef uchar
#define uchar unsigned char
#endif
#define FORMAT_LABEL "mschapv2-naive"
#define FORMAT_NAME "MSCHAPv2 C/R"
#define FORMAT_TAG "$MSCHAPv2$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD4 DES " DES_BS_ALGORITHM_NAME " naive"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125 /* lmcons.h - PWLEN (256) ? 127 ? */
#define USERNAME_LENGTH 256 /* lmcons.h - UNLEN (256) / LM20_UNLEN (20) */
#define DOMAIN_LENGTH 15 /* lmcons.h - CNLEN / DNLEN */
#define BINARY_SIZE 24
#define BINARY_ALIGN 4
#define CHALLENGE_LENGTH 64
#define SALT_SIZE 8
#define SALT_ALIGN 4
#define CIPHERTEXT_LENGTH 48
#define TOTAL_LENGTH 13 + USERNAME_LENGTH + CHALLENGE_LENGTH + CIPHERTEXT_LENGTH
#define MIN_KEYS_PER_CRYPT DES_BS_DEPTH
#define MAX_KEYS_PER_CRYPT DES_BS_DEPTH
static struct fmt_tests tests[] = {
{"", "Cricket8", {"testuser1", "", "", "d07054459a1fdbc266a006f0220e6fac", "33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde", "3545cb1d89b507a5de104435e81b14a4"} },
{"$MSCHAPv2$4c092fd3fd98236502e8591100046326$b912ce522524d33123a982cf330a57f8e953fa7974042b5d$6a4915d0ce61d42be533640a75391925$1111", "2222"},
{"$MSCHAPv2$5B5D7C7D7B3F2F3E3C2C602132262628$82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF$21402324255E262A28295F2B3A337C7E$User", "clientPass"},
{"$MSCHAPv2$d07054459a1fdbc266a006f0220e6fac$33c8331a9b03b7e003f09dd253d740a2bead544143cc8bde$3545cb1d89b507a5de104435e81b14a4$testuser1", "Cricket8"},
{"$MSCHAPv2$56d64cbe7bad61349a0b752335100eaf$d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b$7f8a466cff2a6bf0c80218bbf56d76bc$fred", "OMG!BBQ!11!one"}, /* domain\fred */
{"$MSCHAPv2$b3c42db475b881d3c52ff3923d7b3bf8$f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8$6321f8649b971bd11ce8d5cb22a4a738$bOb", "asdblahblahblahblahblahblahblahblah"}, /* WorkGroup\bOb */
{"$MSCHAPv2$d94e7c7972b2376b28c268583e162de7$eba25a3b04d2c7085d01f842e2befc91745c40db0f792356$0677ca7318fd7f65ae1b4f58c9f4f400$lameuser", ""}, /* no password */
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$foo4", "bar4" },
{"$MSCHAPv2$8710da60ebfc4cab$c4e3bb55904c966927ee68e5f1472e1f5d8ec165713b5360$$", "bar4" },
/* Ettercap generated three test vectors */
{"$MSCHAPv2$3D79CC8CDC0261D4$B700770725F87739ADB110B310D9A289CDBB550ADCA6CB86$solar", "solarisalwaysbusy"},
{"$MSCHAPv2$BA75EB14EFBFBF25$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$lulu", "password"},
{"$MSCHAPv2$95A87FA62EBCD2E3C8B09E1B448A6C72$ED8CC90FD40FAA2D6BCD0ABD0B1F562FD777DF6C5609C98B$E2AE0995EAAC6CEFF0D9757428B51509$lulu", "password"},
/* Single test vector from chapcrack's sample pcap file */
{"$MSCHAPv2$6D0E1C056CD94D5F$1C93ABCE815400686BAECA315F348469256420598A73AD49$moxie", "bPCFyF2uL1p5Lg5yrKmqmY"},
{"", "clientPass", {"User", "", "", "5B5D7C7D7B3F2F3E3C2C602132262628", "82309ECD8D708B5EA08FAA3981CD83544233114A3D85D6DF", "21402324255E262A28295F2B3A337C7E"} },
{"", "OMG!BBQ!11!one", {"domain\\fred", "", "", "56d64cbe7bad61349a0b752335100eaf", "d7d829d9545cef1d631b4e568ffb7586050fa3a4d02dbc0b", "7f8a466cff2a6bf0c80218bbf56d76bc"} }, /* domain\fred */
{"", "", {"lameuser", "", "domain", "d94e7c7972b2376b28c268583e162de7", "eba25a3b04d2c7085d01f842e2befc91745c40db0f792356", "0677ca7318fd7f65ae1b4f58c9f4f400"} }, /* no password */
{"", "asdblahblahblahblahblahblahblahblah", {"WorkGroup\\bOb", "", "", "b3c42db475b881d3c52ff3923d7b3bf8", "f07c7a4eb391f5debe32d814679a5a69661b86b33227c4f8", "6321f8649b971bd11ce8d5cb22a4a738"} }, /* WorkGroup\bOb */
{NULL}
};
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int (*saved_len);
static uchar (*saved_key)[21];
static uchar *challenge;
static int keys_prepared;
static void set_salt(void *salt);
static char *long_to_short(char *orig); /* used to cannonicalize the format */
static void init(struct fmt_main *self)
{
/* LM =2 for DES encryption with no salt and no iterations */
DES_bs_init(2, DES_bs_cpt);
#if DES_bs_mt
self->params.min_keys_per_crypt = DES_bs_min_kpc;
self->params.max_keys_per_crypt = DES_bs_max_kpc;
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
}
static void done(void)
{
MEM_FREE(saved_key);
MEM_FREE(saved_len);
MEM_FREE(saved_plain);
}
static int valid_long(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > TOTAL_LENGTH)
return 0;
/* Validate Authenticator/Server Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
/* Validate Peer/Client Challenge Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 2)) )
return 0;
/* Validate Username Length */
if (strlen(++pos2) > USERNAME_LENGTH)
return 0;
return 1;
}
static int valid_short(char *ciphertext)
{
char *pos, *pos2;
if (ciphertext == NULL) return 0;
else if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN)!=0) return 0;
if (strlen(ciphertext) > TOTAL_LENGTH)
return 0;
/* Validate MSCHAPv2 Challenge Length */
pos = &ciphertext[FORMAT_TAG_LEN];
for (pos2 = pos; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CHALLENGE_LENGTH / 4)) )
return 0;
/* Validate MSCHAPv2 Response Length */
pos2++; pos = pos2;
for (; *pos2 != '$'; pos2++)
if (atoi16[ARCH_INDEX(*pos2)] == 0x7F)
return 0;
if ( !(*pos2 && (pos2 - pos == CIPHERTEXT_LENGTH)) )
return 0;
return 1;
}
static int valid(char *ciphertext, struct fmt_main *pFmt)
{
return valid_short(ciphertext) ||
valid_long(ciphertext);
}
static char *prepare_long(char *split_fields[10])
{
char *username, *cp;
/* DOMAIN\USERNAME -or - USERNAME -- ignore DOMAIN */
if ((username = strstr(split_fields[0], "\\")) == NULL)
username = split_fields[0];
else
username++;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+1+strlen(split_fields[5])+1+strlen(username)+1);
sprintf(cp, "%s%s$%s$%s$%s", FORMAT_TAG, split_fields[3], split_fields[4], split_fields[5], username);
if (valid_long(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *prepare_short(char *split_fields[10])
{
char *cp;
cp = mem_alloc(FORMAT_TAG_LEN+strlen(split_fields[3])+1+strlen(split_fields[4])+1+1+1);
sprintf(cp, "%s%s$%s$$", FORMAT_TAG, split_fields[3], split_fields[4]);
if (valid_short(cp)) {
char *cp2 = str_alloc_copy(cp);
MEM_FREE(cp);
return cp2;
}
MEM_FREE(cp);
return split_fields[1];
}
static char *prepare(char *split_fields[10], struct fmt_main *pFmt)
{
char *ret;
if (!strncmp(split_fields[1], FORMAT_TAG, FORMAT_TAG_LEN)) {
// check for a short format that has any extra trash fields, and if so remove them.
char *cp1, *cp2, *cp3;
cp1 = split_fields[1];
cp1 += FORMAT_TAG_LEN;
cp2 = strchr(cp1, '$');
ret = NULL;
if (cp2 && cp2-cp1 == CHALLENGE_LENGTH/4) {
++cp2;
cp3 = strchr(cp2, '$');
if (cp3 && cp3-cp2 == CIPHERTEXT_LENGTH && (strlen(cp3) > 2 || cp3[2] != '$')) {
ret = str_alloc_copy(split_fields[1]);
ret[(cp3-split_fields[1])+1] = '$';
ret[(cp3-split_fields[1])+2] = 0;
//printf ("Here is the cut item: %s\n", ret);
}
}
}
else if (split_fields[0] && split_fields[3] && split_fields[4] && split_fields[5] &&
strlen(split_fields[3]) == CHALLENGE_LENGTH/2 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH &&
strlen(split_fields[5]) == CHALLENGE_LENGTH/2)
ret = prepare_long(split_fields);
else if (split_fields[0] && split_fields[3] && split_fields[4] &&
strlen(split_fields[3]) == CHALLENGE_LENGTH/4 &&
strlen(split_fields[4]) == CIPHERTEXT_LENGTH)
ret = prepare_short(split_fields);
else
ret = NULL;
if (ret && valid_long(ret))
ret = long_to_short(ret);
else if (valid_long(split_fields[1]))
ret = long_to_short(split_fields[1]);
return ret ? ret : split_fields[1];
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char *out;
int i, j = 0;
if (!out) out = mem_alloc_tiny(TOTAL_LENGTH + 1, MEM_ALIGN_WORD);
memset(out, 0, TOTAL_LENGTH + 1);
memcpy(out, ciphertext, strlen(ciphertext));
/* convert hashes to lower-case - exclude $MSCHAPv2 and USERNAME */
for (i = FORMAT_TAG_LEN; i < TOTAL_LENGTH + 1 && j < 3; i++) {
if (out[i] >= 'A' && out[i] <= 'Z')
out[i] |= 0x20;
else if (out[i] == '$')
j++;
}
if (valid_long(out))
return long_to_short(out);
return out;
}
static uint32_t *generate_des_format(uchar* binary)
{
static uint32_t out[6];
ARCH_WORD block[6];
int chr, src,dst,i;
uchar value, mask;
ARCH_WORD *ptr;
memset(block, 0, sizeof(block));
for (chr = 0; chr < 24; chr=chr + 8)
{
dst = 0;
for (i=0; i<8; i++)
{
value = binary[chr + i];
mask = 0x80;
for (src = 0; src < 8; src++) {
if (value & mask)
block[(chr/4) + (dst>>5)]|= 1U << (dst & 0x1F);
mask >>= 1;
dst++;
}
}
}
/* Apply initial permutation on ciphertext blocks */
for (i=0; i<6; i=i+2)
{
ptr = DES_do_IP(&block[i]);
out[i] = ptr[1];
out[i+1] = ptr[0];
}
return out;
}
static void *get_binary(char *ciphertext)
{
uchar binary[BINARY_SIZE];
int i;
uint32_t *ptr;
if (valid_short(ciphertext))
ciphertext += FORMAT_TAG_LEN + CHALLENGE_LENGTH / 4 + 1; /* Skip - $MSCHAPv2$, MSCHAPv2 Challenge */
else
ciphertext += FORMAT_TAG_LEN + CHALLENGE_LENGTH / 2 + 1; /* Skip - $MSCHAPv2$, Authenticator Challenge */
for (i=0; i<BINARY_SIZE; i++) {
binary[i] = (atoi16[ARCH_INDEX(ciphertext[i*2])])<<4;
binary[i] |= (atoi16[ARCH_INDEX(ciphertext[i*2+1])]);
}
/* Set binary in DES format */
ptr = generate_des_format(binary);
return ptr;
}
inline static void setup_des_key(unsigned char key_56[], int index)
{
char key[8];
/* Right shift key bytes by 1 to bring in openssl format */
/* Each byte of key is xored with 0x80 to pass check for 0 in DES_bs_set_key() */
key[0] = (key_56[0] >> 1) | 0x80;
key[1] = (((key_56[0] << 7) | (key_56[1] >> 1)) >>1) | 0x80;
key[2] = (((key_56[1] << 6) | (key_56[2] >> 2)) >>1) | 0x80;
key[3] = (((key_56[2] << 5) | (key_56[3] >> 3)) >>1) | 0x80;
key[4] = (((key_56[3] << 4) | (key_56[4] >> 4)) >>1) | 0x80;
key[5] = (((key_56[4] << 3) | (key_56[5] >> 5)) >>1) | 0x80;
key[6] = (((key_56[5] << 2) | (key_56[6] >> 6)) >>1) | 0x80;
key[7] = ((key_56[6] << 1) >>1 ) | 0x80;
DES_bs_set_key((char*)key, index);
}
/* Calculate the MSCHAPv2 response for the given challenge, using the
specified authentication identity (username), password and client
nonce.
*/
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int i;
if (!keys_prepared) {
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < count; i++) {
int len;
/* Generate 16-byte NTLM hash */
len = E_md4hash((uchar *) saved_plain[i], saved_len[i],
saved_key[i]);
if (len <= 0)
saved_plain[i][-len] = 0; // match truncation
/* NULL-padding the 16-byte hash to 21-bytes is made
in cmp_exact if needed */
setup_des_key(saved_key[i], i);
}
keys_prepared = 1;
}
/* Bitsliced des encryption */
DES_bs_crypt_plain(count);
return count;
}
static int cmp_all(void *binary, int count)
{
return DES_bs_cmp_all((uint32_t *)binary, count);
}
static int cmp_one(void *binary, int index)
{
return DES_bs_cmp_one((uint32_t *)binary, 32, index);
}
static int cmp_exact(char *source, int index)
{
uint32_t *binary = get_binary(source);
if (!DES_bs_cmp_one(binary, 64, index))
return 0;
setup_des_key(&saved_key[index][7], 0);
DES_bs_crypt_plain(1);
if (!DES_bs_cmp_one(&binary[2], 64, 0))
{
setup_des_key(saved_key[0], 0);
DES_bs_crypt_plain(1);
return 0;
}
/* NULL-pad 16-byte NTLM hash to 21-bytes (postponed until now) */
memset(&saved_key[index][16], 0, 5);
setup_des_key(&saved_key[index][14], 0);
DES_bs_crypt_plain(1);
if (!DES_bs_cmp_one(&binary[4], 64, 0))
{
setup_des_key(saved_key[0], 0);
DES_bs_crypt_plain(1);
return 0;
}
setup_des_key(saved_key[0], 0);
DES_bs_crypt_plain(1);
return 1;
}
/* Either the cipherext already contains the MSCHAPv2 Challenge (4 Bytes) or
we are going to calculate it via:
sha1(|Peer/Client Challenge (8 Bytes)|Authenticator/Server Challenge (8 Bytes)|Username (<=256)|)
NOTE, we now ONLY call this function the the short form. The long form gets converted into the short
form in either prepare or split function. The short form is cannonical form (Change made July, 2014, JimF)
*/
static void *get_salt(char *ciphertext)
{
static union {
unsigned char u8[SALT_SIZE];
uint32_t u32[SALT_SIZE / 4];
} binary_salt;
int i, cnt;
uchar j;
char *pos = NULL;
unsigned char temp[SALT_SIZE];
pos = ciphertext + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++)
binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
/* Apply IP to salt */
memset(temp, 0, SALT_SIZE);
for (i = 0; i < 64; i++) {
cnt = DES_IP[i ^ 0x20];
j = (uchar)((binary_salt.u8[cnt >> 3] >> (7 - (cnt & 7))) & 1);
temp[i/8] |= j << (7 - (i % 8));
}
memcpy(binary_salt.u8, temp, SALT_SIZE);
return (void*)binary_salt.u32;
}
/*
* This function will convert long hashes, into short ones (the short is now cannonical format)
* converts
* $MSCHAPv2$95a87fa62ebcd2e3c8b09e1b448a6c72$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$e2ae0995eaac6ceff0d9757428b51509$lulu
* into
* $MSCHAPv2$ba75eb14efbfbf25$ed8cc90fd40faa2d6bcd0abd0b1f562fd777df6c5609c98b$$
*
* This code was moved from get_salt().
*/
static char *long_to_short(char *ciphertext) {
static char Buf[TOTAL_LENGTH+1]; // larger than we need, but not a big deal
static SHA_CTX ctx;
unsigned char tmp[16];
unsigned char digest[20];
char *pos = NULL;
int i;
SHA1_Init(&ctx);
/* Peer Challenge */
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1; /* Skip $MSCHAPv2$, Authenticator Challenge and Response Hash */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Authenticator Challenge */
pos = ciphertext + FORMAT_TAG_LEN; /* Skip $MSCHAPv2$ */
memset(tmp, 0, 16);
for (i = 0; i < 16; i++)
tmp[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
SHA1_Update(&ctx, tmp, 16);
/* Username - Only the user name (as presented by the peer and
excluding any prepended domain name) is used as input to SHAUpdate()
*/
pos = ciphertext + FORMAT_TAG_LEN + 16*2 + 1 + 24*2 + 1 + 16*2 + 1; /* Skip $MSCHAPv2$, Authenticator, Response and Peer */
SHA1_Update(&ctx, pos, strlen(pos));
SHA1_Final(digest, &ctx);
// Ok, now we re-make our ciphertext buffer, into the short cannonical form.
strcpy(Buf, FORMAT_TAG);
pos = Buf + FORMAT_TAG_LEN;
for (i = 0; i < SALT_SIZE; i++) {
//binary_salt.u8[i] = (atoi16[ARCH_INDEX(pos[i*2])] << 4) + atoi16[ARCH_INDEX(pos[i*2+1])];
pos[(i<<1)] = itoa16[digest[i]>>4];
pos[(i<<1)+1] = itoa16[digest[i]&0xF];
}
memcpy(&pos[16], &ciphertext[42], CIPHERTEXT_LENGTH+2);
pos[16+CIPHERTEXT_LENGTH+2] = '$';
pos[16+CIPHERTEXT_LENGTH+3] = 0;
//printf ("short=%s original=%s\n", Buf, ciphertext);
return Buf;
}
static void set_salt(void *salt)
{
challenge = salt;
DES_bs_generate_plaintext(challenge);
}
static void mschapv2_set_key(char *key, int index)
{
saved_len[index] = strlen(key);
memcpy(saved_plain[index], key, saved_len[index] + 1);
keys_prepared = 0;
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int salt_hash(void *salt)
{
return *(uint32_t *)salt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_MSCHAPv2_old = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#if DES_BS
FMT_BS |
#if DES_bs_mt
FMT_OMP | FMT_OMP_BAD |
#endif
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_UNICODE | FMT_UTF8,
{ NULL },
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
NULL,
set_salt,
mschapv2_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
DES_bs_get_hash_0,
DES_bs_get_hash_1,
DES_bs_get_hash_2,
DES_bs_get_hash_3,
DES_bs_get_hash_4,
DES_bs_get_hash_5,
DES_bs_get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
colorspace.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO L OOO RRRR SSSSS PPPP AAA CCCC EEEEE %
% C O O L O O R R SS P P A A C E %
% C O O L O O RRRR SSS PPPP AAAAA C EEE %
% C O O L O O R R SS P A A C E %
% CCCC OOO LLLLL OOO R R SSSSS P A A CCCC EEEEE %
% %
% %
% MagickCore Image Colorspace Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/property.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/enhance.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/utility.h"
/*
Typedef declarations.
*/
typedef struct _TransformPacket
{
MagickRealType
x,
y,
z;
} TransformPacket;
/*
Forward declarations.
*/
static MagickBooleanType
TransformsRGBImage(Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C o l o r s p a c e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageColorspaceType() returns the potential type of image:
% sRGBColorspaceType, RGBColorspaceType, GRAYColorspaceType, etc.
%
% To ensure the image type matches its potential, use SetImageColorspaceType():
%
% (void) SetImageColorspaceType(image,GetImageColorspaceType(image),
% exception);
%
% The format of the GetImageColorspaceType method is:
%
% ColorspaceType GetImageColorspaceType(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ColorspaceType GetImageColorspaceType(const Image *image,
ExceptionInfo *exception)
{
ColorspaceType
colorspace;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
colorspace=image->colorspace;
type=IdentifyImageType(image,exception);
if ((type == BilevelType) || (type == GrayscaleType) ||
(type == GrayscaleAlphaType))
colorspace=GRAYColorspace;
return(colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ s R G B T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% sRGBTransformImage() converts the reference image from sRGB to an alternate
% colorspace. The transformation matrices are not the standard ones: the
% weights are rescaled to normalized the range of the transformed values to
% be [0..QuantumRange].
%
% The format of the sRGBTransformImage method is:
%
% MagickBooleanType sRGBTransformImage(Image *image,
% const ColorspaceType colorspace,EsceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace to transform the image to.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertAdobe98ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertAdobe98ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertDisplayP3ToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertDisplayP3ToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertProPhotoToRGB(const double r,const double g,
const double b,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertProPhotoToXYZ(r,g,b,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertRGBToCMY(const double red,const double green,
const double blue,double *cyan,double *magenta,double *yellow)
{
*cyan=QuantumScale*(QuantumRange-red);
*magenta=QuantumScale*(QuantumRange-green);
*yellow=QuantumScale*(QuantumRange-blue);
}
static void ConvertRGBToAdobe98(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToAdobe98(X,Y,Z,r,g,b);
}
static void ConvertRGBToDisplayP3(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToDisplayP3(X,Y,Z,r,g,b);
}
static void ConvertRGBToProPhoto(const double red,const double green,
const double blue,double *r,double *g,double *b)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToProPhoto(X,Y,Z,r,g,b);
}
static inline void ConvertXYZToLMS(const double x,const double y,
const double z,double *L,double *M,double *S)
{
*L=0.7328*x+0.4296*y-0.1624*z;
*M=(-0.7036*x+1.6975*y+0.0061*z);
*S=0.0030*x+0.0136*y+0.9834*z;
}
static void ConvertRGBToLMS(const double red,const double green,
const double blue,double *L,double *M,double *S)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLMS(X,Y,Z,L,M,S);
}
static void ConvertRGBToLuv(const double red,const double green,
const double blue,const IlluminantType illuminant,double *L,double *u,
double *v)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
ConvertXYZToLuv(X,Y,Z,illuminant,L,u,v);
}
static void ConvertRGBToxyY(const double red,const double green,
const double blue,double *low_x,double *low_y,double *cap_Y)
{
double
gamma,
X,
Y,
Z;
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
gamma=PerceptibleReciprocal(X+Y+Z);
*low_x=gamma*X;
*low_y=gamma*Y;
*cap_Y=Y;
}
static void inline ConvertXYZToJzazbz(const double X,const double Y,
const double Z,const double white_luminance,double *Jz,double *az,double *bz)
{
#define Jzazbz_b 1.15 /* https://observablehq.com/@jrus/jzazbz */
#define Jzazbz_g 0.66
#define Jzazbz_c1 (3424.0/4096.0)
#define Jzazbz_c2 (2413.0/128.0)
#define Jzazbz_c3 (2392.0/128.0)
#define Jzazbz_n (2610.0/16384.0)
#define Jzazbz_p (1.7*2523.0/32.0)
#define Jzazbz_d (-0.56)
#define Jzazbz_d0 (1.6295499532821566e-11)
double
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
Xp=(Jzazbz_b*X-Z*(Jzazbz_b-1));
Yp=(Jzazbz_g*Y-X*(Jzazbz_g-1));
Zp=Z;
L=0.41478972*Xp+0.579999*Yp+0.0146480*Zp;
M=(-0.2015100)*Xp+1.120649*Yp+0.0531008*Zp;
S=(-0.0166008)*Xp+0.264800*Yp+0.6684799*Zp;
gamma=pow(L*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Lp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(M*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Mp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
gamma=pow(S*PerceptibleReciprocal(white_luminance),Jzazbz_n);
Sp=pow((Jzazbz_c1+Jzazbz_c2*gamma)/(1.0+Jzazbz_c3*gamma),Jzazbz_p);
Iz=0.5*Lp+0.5*Mp;
*az=3.52400*Lp-4.066708*Mp+0.542708*Sp+0.5;
*bz=0.199076*Lp+1.096799*Mp-1.295875*Sp+0.5;
*Jz=((Jzazbz_d+1.0)*Iz)/(Jzazbz_d*Iz+1.0)-Jzazbz_d0;
}
static void inline ConvertJzazbzToXYZ(const double Jz,const double az,
const double bz,const double white_luminance,double *X,double *Y,double *Z)
{
double
azz,
bzz,
gamma,
Iz,
L,
Lp,
M,
Mp,
S,
Sp,
Xp,
Yp,
Zp;
gamma=Jz+Jzazbz_d0;
Iz=gamma/(Jzazbz_d-Jzazbz_d*gamma+1.0);
azz=az-0.5;
bzz=bz-0.5;
Lp=Iz+0.138605043271539*azz+0.0580473161561189*bzz;
Mp=Iz-0.138605043271539*azz-0.0580473161561189*bzz;
Sp=Iz-0.0960192420263189*azz-0.811891896056039*bzz;
gamma=pow(Lp,1.0/Jzazbz_p);
L=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Mp,1.0/Jzazbz_p);
M=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
gamma=pow(Sp,1.0/Jzazbz_p);
S=white_luminance*pow((Jzazbz_c1-gamma)/(Jzazbz_c3*gamma-Jzazbz_c2),1.0/
Jzazbz_n);
Xp=1.92422643578761*L-1.00479231259537*M+0.037651404030618*S;
Yp=0.350316762094999*L+0.726481193931655*M-0.065384422948085*S;
Zp=(-0.0909828109828476)*L-0.312728290523074*M+1.52276656130526*S;
*X=(Xp+(Jzazbz_b-1.0)*Zp)/Jzazbz_b;
*Y=(Yp+(Jzazbz_g-1.0)**X)/Jzazbz_g;
*Z=Zp;
}
static void ConvertRGBToJzazbz(const double red,const double green,
const double blue,const double white_luminance,double *Jz,double *az,
double *bz)
{
double
X,
Y,
Z;
ConvertRGBToXYZ(red,blue,green,&X,&Y,&Z);
ConvertXYZToJzazbz(X,Y,Z,white_luminance,Jz,az,bz);
}
static void ConvertJzazbzToRGB(const double Jz,const double az,
const double bz,const double white_luminance,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertJzazbzToXYZ(Jz,az,bz,white_luminance,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,blue,green);
}
static void ConvertRGBToYDbDr(const double red,const double green,
const double blue,double *Y,double *Db,double *Dr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Db=QuantumScale*(-0.450*red-0.883*green+1.333*blue)+0.5;
*Dr=QuantumScale*(-1.333*red+1.116*green+0.217*blue)+0.5;
}
static void ConvertRGBToYIQ(const double red,const double green,
const double blue,double *Y,double *I,double *Q)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*I=QuantumScale*(0.595716*red-0.274453*green-0.321263*blue)+0.5;
*Q=QuantumScale*(0.211456*red-0.522591*green+0.311135*blue)+0.5;
}
static void ConvertRGBToYPbPr(const double red,const double green,
const double blue,double *Y,double *Pb,double *Pr)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*Pb=QuantumScale*((-0.1687367)*red-0.331264*green+0.5*blue)+0.5;
*Pr=QuantumScale*(0.5*red-0.418688*green-0.081312*blue)+0.5;
}
static void ConvertRGBToYCbCr(const double red,const double green,
const double blue,double *Y,double *Cb,double *Cr)
{
ConvertRGBToYPbPr(red,green,blue,Y,Cb,Cr);
}
static void ConvertRGBToYUV(const double red,const double green,
const double blue,double *Y,double *U,double *V)
{
*Y=QuantumScale*(0.298839*red+0.586811*green+0.114350*blue);
*U=QuantumScale*((-0.147)*red-0.289*green+0.436*blue)+0.5;
*V=QuantumScale*(0.615*red-0.515*green-0.100*blue)+0.5;
}
static MagickBooleanType sRGBTransformImage(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
#define sRGBTransformImageTag "RGBTransform/Image"
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
PrimaryInfo
primary_info;
ssize_t
i;
ssize_t
y;
TransformPacket
*x_map,
*y_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(colorspace != sRGBColorspace);
assert(colorspace != TransparentColorspace);
assert(colorspace != UndefinedColorspace);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Convert RGB to CMYK colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertRGBToCMYK(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->type=image->alpha_trait == UndefinedPixelTrait ?
ColorSeparationType : ColorSeparationAlphaType;
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*DecodePixelGamma(GetPixelRed(image,q))+0.715158*
DecodePixelGamma(GetPixelGreen(image,q))+0.072186*
DecodePixelGamma(GetPixelBlue(image,q));
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case GRAYColorspace:
{
/*
Transform image from sRGB to GRAY.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelGray(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
image->type=GrayscaleType;
return(status);
}
case CMYColorspace:
case Adobe98Colorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from sRGB to target colorspace.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
red=(double) GetPixelRed(image,q);
green=(double) GetPixelGreen(image,q);
blue=(double) GetPixelBlue(image,q);
switch (colorspace)
{
case Adobe98Colorspace:
{
ConvertRGBToAdobe98(red,green,blue,&X,&Y,&Z);
break;
}
case CMYColorspace:
{
ConvertRGBToCMY(red,green,blue,&X,&Y,&Z);
break;
}
case DisplayP3Colorspace:
{
ConvertRGBToDisplayP3(red,green,blue,&X,&Y,&Z);
break;
}
case HCLColorspace:
{
ConvertRGBToHCL(red,green,blue,&X,&Y,&Z);
break;
}
case HCLpColorspace:
{
ConvertRGBToHCLp(red,green,blue,&X,&Y,&Z);
break;
}
case HSBColorspace:
{
ConvertRGBToHSB(red,green,blue,&X,&Y,&Z);
break;
}
case HSIColorspace:
{
ConvertRGBToHSI(red,green,blue,&X,&Y,&Z);
break;
}
case HSLColorspace:
{
ConvertRGBToHSL(red,green,blue,&X,&Y,&Z);
break;
}
case HSVColorspace:
{
ConvertRGBToHSV(red,green,blue,&X,&Y,&Z);
break;
}
case HWBColorspace:
{
ConvertRGBToHWB(red,green,blue,&X,&Y,&Z);
break;
}
case JzazbzColorspace:
{
ConvertRGBToJzazbz(red,green,blue,white_luminance,&X,&Y,&Z);
break;
}
case LabColorspace:
{
ConvertRGBToLab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertRGBToLCHab(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LCHuvColorspace:
{
ConvertRGBToLCHuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case LMSColorspace:
{
ConvertRGBToLMS(red,green,blue,&X,&Y,&Z);
break;
}
case LuvColorspace:
{
ConvertRGBToLuv(red,green,blue,illuminant,&X,&Y,&Z);
break;
}
case ProPhotoColorspace:
{
ConvertRGBToProPhoto(red,green,blue,&X,&Y,&Z);
break;
}
case xyYColorspace:
{
ConvertRGBToxyY(red,green,blue,&X,&Y,&Z);
break;
}
case XYZColorspace:
{
ConvertRGBToXYZ(red,green,blue,&X,&Y,&Z);
break;
}
case YCbCrColorspace:
{
ConvertRGBToYCbCr(red,green,blue,&X,&Y,&Z);
break;
}
case YDbDrColorspace:
{
ConvertRGBToYDbDr(red,green,blue,&X,&Y,&Z);
break;
}
case YIQColorspace:
{
ConvertRGBToYIQ(red,green,blue,&X,&Y,&Z);
break;
}
case YPbPrColorspace:
{
ConvertRGBToYPbPr(red,green,blue,&X,&Y,&Z);
break;
}
case YUVColorspace:
{
ConvertRGBToYUV(red,green,blue,&X,&Y,&Z);
break;
}
default:
{
X=QuantumScale*red;
Y=QuantumScale*green;
Z=QuantumScale*blue;
break;
}
}
SetPixelRed(image,ClampToQuantum(QuantumRange*X),q);
SetPixelGreen(image,ClampToQuantum(QuantumRange*Y),q);
SetPixelBlue(image,ClampToQuantum(QuantumRange*Z),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
#define DisplayGamma (1.0/1.7)
#define FilmGamma 0.6
#define ReferenceBlack 95.0
#define ReferenceWhite 685.0
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform RGB to Log colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
logmap[i]=ScaleMapToQuantum((double) (MaxMap*(reference_white+
log10(black+(1.0*i/MaxMap)*(1.0-black))/((gamma/density)*0.002*
PerceptibleReciprocal(film_gamma)))/1024.0));
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) DecodePixelGamma((MagickRealType)
GetPixelRed(image,q));
green=(double) DecodePixelGamma((MagickRealType)
GetPixelGreen(image,q));
blue=(double) DecodePixelGamma((MagickRealType)
GetPixelBlue(image,q));
SetPixelRed(image,logmap[ScaleQuantumToMap(ClampToQuantum(red))],q);
SetPixelGreen(image,logmap[ScaleQuantumToMap(ClampToQuantum(green))],
q);
SetPixelBlue(image,logmap[ScaleQuantumToMap(ClampToQuantum(blue))],q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform image from sRGB to linear RGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red;
red=DecodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=DecodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=DecodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
(void) memset(&primary_info,0,sizeof(primary_info));
switch (colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
I and Q, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.33333*(double) i);
x_map[i].y=(MagickRealType) (0.50000*(double) i);
x_map[i].z=(MagickRealType) (-0.25000*(double) i);
y_map[i].x=(MagickRealType) (0.33334*(double) i);
y_map[i].y=(MagickRealType) (0.00000*(double) i);
y_map[i].z=(MagickRealType) (0.50000*(double) i);
z_map[i].x=(MagickRealType) (0.33333*(double) i);
z_map[i].y=(MagickRealType) (-0.50000*(double) i);
z_map[i].z=(MagickRealType) (-0.25000*(double) i);
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.601):
Y = 0.2988390*R+0.5868110*G+0.1143500*B
Cb= -0.1687367*R-0.3312640*G+0.5000000*B
Cr= 0.5000000*R-0.4186880*G-0.0813120*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.298839*(double) i);
x_map[i].y=(MagickRealType) (-0.1687367*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.586811*(double) i);
y_map[i].y=(MagickRealType) (-0.331264*(double) i);
y_map[i].z=(MagickRealType) (-0.418688*(double) i);
z_map[i].x=(MagickRealType) (0.114350*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.081312*(double) i);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables (ITU-R BT.709):
Y = 0.212656*R+0.715158*G+0.072186*B
Cb= -0.114572*R-0.385428*G+0.500000*B
Cr= 0.500000*R-0.454153*G-0.045847*B
Cb and Cr, normally -0.5 through 0.5, are normalized to the range 0
through QuantumRange.
*/
primary_info.y=(double) (MaxMap+1.0)/2.0;
primary_info.z=(double) (MaxMap+1.0)/2.0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (0.212656*(double) i);
x_map[i].y=(MagickRealType) (-0.114572*(double) i);
x_map[i].z=(MagickRealType) (0.500000*(double) i);
y_map[i].x=(MagickRealType) (0.715158*(double) i);
y_map[i].y=(MagickRealType) (-0.385428*(double) i);
y_map[i].z=(MagickRealType) (-0.454153*(double) i);
z_map[i].x=(MagickRealType) (0.072186*(double) i);
z_map[i].y=(MagickRealType) (0.500000*(double) i);
z_map[i].z=(MagickRealType) (-0.045847*(double) i);
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
Y = 0.298839*R+0.586811*G+0.114350*B
C1= -0.298839*R-0.586811*G+0.88600*B
C2= 0.70100*R-0.586811*G-0.114350*B
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
primary_info.y=(double) ScaleQuantumToMap(ScaleCharToQuantum(156));
primary_info.z=(double) ScaleQuantumToMap(ScaleCharToQuantum(137));
for (i=0; i <= (ssize_t) (0.018*MaxMap); i++)
{
x_map[i].x=0.005382*i;
x_map[i].y=(-0.003296)*i;
x_map[i].z=0.009410*i;
y_map[i].x=0.010566*i;
y_map[i].y=(-0.006471)*i;
y_map[i].z=(-0.007880)*i;
z_map[i].x=0.002052*i;
z_map[i].y=0.009768*i;
z_map[i].z=(-0.001530)*i;
}
for ( ; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.298839*(1.099*i-0.099);
x_map[i].y=(-0.298839)*(1.099*i-0.099);
x_map[i].z=0.70100*(1.099*i-0.099);
y_map[i].x=0.586811*(1.099*i-0.099);
y_map[i].y=(-0.586811)*(1.099*i-0.099);
y_map[i].z=(-0.586811)*(1.099*i-0.099);
z_map[i].x=0.114350*(1.099*i-0.099);
z_map[i].y=0.88600*(1.099*i-0.099);
z_map[i].z=(-0.114350)*(1.099*i-0.099);
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
x_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].x=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
z_map[i].y=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert from sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
Quantum
*magick_restrict q;
ssize_t
x;
unsigned int
blue,
green,
red;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
red=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelRed(image,q)));
green=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelGreen(image,q)));
blue=ScaleQuantumToMap(ClampToQuantum((MagickRealType)
GetPixelBlue(image,q)));
pixel.red=(x_map[red].x+y_map[green].x+z_map[blue].x)+
primary_info.x;
pixel.green=(x_map[red].y+y_map[green].y+z_map[blue].y)+
primary_info.y;
pixel.blue=(x_map[red].z+y_map[green].z+z_map[blue].z)+
primary_info.z;
SetPixelRed(image,ScaleMapToQuantum(pixel.red),q);
SetPixelGreen(image,ScaleMapToQuantum(pixel.green),q);
SetPixelBlue(image,ScaleMapToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,sRGBTransformImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
unsigned int
blue,
green,
red;
/*
Convert PseudoClass image.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x+primary_info.x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y+primary_info.y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z+primary_info.z;
image->colormap[i].red=(double) ScaleMapToQuantum(pixel.red);
image->colormap[i].green=(double) ScaleMapToQuantum(pixel.green);
image->colormap[i].blue=(double) ScaleMapToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,colorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorspace() sets the colorspace member of the Image structure.
%
% The format of the SetImageColorspace method is:
%
% MagickBooleanType SetImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptiionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
ImageType
type;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (image->colorspace == colorspace)
return(MagickTrue);
image->colorspace=colorspace;
image->rendering_intent=UndefinedIntent;
image->gamma=1.000/2.200;
(void) memset(&image->chromaticity,0,sizeof(image->chromaticity));
type=image->type;
if (IsGrayColorspace(colorspace) != MagickFalse)
{
if (colorspace == LinearGRAYColorspace)
image->gamma=1.000;
type=GrayscaleType;
}
else
if ((IsRGBColorspace(colorspace) != MagickFalse) ||
(colorspace == XYZColorspace) || (colorspace == xyYColorspace))
image->gamma=1.000;
else
{
image->rendering_intent=PerceptualIntent;
image->chromaticity.red_primary.x=0.6400;
image->chromaticity.red_primary.y=0.3300;
image->chromaticity.red_primary.z=0.0300;
image->chromaticity.green_primary.x=0.3000;
image->chromaticity.green_primary.y=0.6000;
image->chromaticity.green_primary.z=0.1000;
image->chromaticity.blue_primary.x=0.1500;
image->chromaticity.blue_primary.y=0.0600;
image->chromaticity.blue_primary.z=0.7900;
image->chromaticity.white_point.x=0.3127;
image->chromaticity.white_point.y=0.3290;
image->chromaticity.white_point.z=0.3583;
}
status=SyncImagePixelCache(image,exception);
image->type=type;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e G r a y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageGray() returns MagickTrue if all the pixels in the image have the
% same red, green, and blue intensities and changes the type of the image to
% bi-level or grayscale.
%
% The format of the SetImageGray method is:
%
% MagickBooleanType SetImageGray(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageGray(Image *image,
ExceptionInfo *exception)
{
const char
*value;
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageGray(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
value=GetImageProperty(image,"colorspace:auto-grayscale",exception);
if (IsStringFalse(value) != MagickFalse)
return(MagickFalse);
type=IdentifyImageGray(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M o n o c h r o m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMonochrome() returns MagickTrue if all the pixels in the image have
% the same red, green, and blue intensities and the intensity is either
% 0 or QuantumRange and changes the type of the image to bi-level.
%
% The format of the SetImageMonochrome method is:
%
% MagickBooleanType SetImageMonochrome(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMonochrome(Image *image,
ExceptionInfo *exception)
{
ImageType
type;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (IsImageMonochrome(image) != MagickFalse)
return(MagickTrue);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
return(MagickFalse);
type=IdentifyImageMonochrome(image,exception);
if (type == UndefinedType)
return(MagickFalse);
image->colorspace=GRAYColorspace;
if (SyncImagePixelCache((Image *) image,exception) == MagickFalse)
return(MagickFalse);
image->type=type;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImageColorspace() transforms an image colorspace, changing the
% image data to reflect the new colorspace.
%
% The format of the TransformImageColorspace method is:
%
% MagickBooleanType TransformImageColorspace(Image *image,
% const ColorspaceType colorspace,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o colorspace: the colorspace.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransformImageColorspace(Image *image,
const ColorspaceType colorspace,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->colorspace == colorspace)
return(SetImageColorspace(image,colorspace,exception));
(void) DeleteImageProfile(image,"icc");
(void) DeleteImageProfile(image,"icm");
if (colorspace == UndefinedColorspace)
return(SetImageColorspace(image,colorspace,exception));
/*
Convert the reference image from an alternate colorspace to sRGB.
*/
if (IssRGBColorspace(colorspace) != MagickFalse)
return(TransformsRGBImage(image,exception));
status=MagickTrue;
if (IssRGBColorspace(image->colorspace) == MagickFalse)
status=TransformsRGBImage(image,exception);
if (status == MagickFalse)
return(status);
/*
Convert the reference image from sRGB to an alternate colorspace.
*/
if (sRGBTransformImage(image,colorspace,exception) == MagickFalse)
status=MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ T r a n s f o r m s R G B I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformsRGBImage() converts the reference image from an alternate
% colorspace to sRGB. The transformation matrices are not the standard ones:
% the weights are rescaled to normalize the range of the transformed values
% to be [0..QuantumRange].
%
% The format of the TransformsRGBImage method is:
%
% MagickBooleanType TransformsRGBImage(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline void ConvertCMYToRGB(const double cyan,const double magenta,
const double yellow,double *red,double *green,double *blue)
{
*red=QuantumRange*(1.0-cyan);
*green=QuantumRange*(1.0-magenta);
*blue=QuantumRange*(1.0-yellow);
}
static inline void ConvertLMSToXYZ(const double L,const double M,const double S,
double *X,double *Y,double *Z)
{
*X=1.096123820835514*L-0.278869000218287*M+0.182745179382773*S;
*Y=0.454369041975359*L+0.473533154307412*M+0.072097803717229*S;
*Z=(-0.009627608738429)*L-0.005698031216113*M+1.015325639954543*S;
}
static inline void ConvertLMSToRGB(const double L,const double M,
const double S,double *red,double *green,double *blue)
{
double
X,
Y,
Z;
ConvertLMSToXYZ(L,M,S,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertLuvToRGB(const double L,const double u,
const double v,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLuvToXYZ(100.0*L,354.0*u-134.0,262.0*v-140.0,illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline ssize_t RoundToYCC(const double value)
{
if (value <= 0.0)
return(0);
if (value >= 1388.0)
return(1388);
return((ssize_t) (value+0.5));
}
static inline void ConvertLabToRGB(const double L,const double a,
const double b,const IlluminantType illuminant,double *red,double *green,
double *blue)
{
double
X,
Y,
Z;
ConvertLabToXYZ(100.0*L,255.0*(a-0.5),255.0*(b-0.5),illuminant,&X,&Y,&Z);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static inline void ConvertxyYToRGB(const double low_x,const double low_y,
const double cap_Y,double *red,double *green,double *blue)
{
double
gamma,
X,
Y,
Z;
gamma=PerceptibleReciprocal(low_y);
X=gamma*cap_Y*low_x;
Y=cap_Y;
Z=gamma*cap_Y*(1.0-low_x-low_y);
ConvertXYZToRGB(X,Y,Z,red,green,blue);
}
static void ConvertYPbPrToRGB(const double Y,const double Pb,const double Pr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(0.99999999999914679361*Y-1.2188941887145875e-06*(Pb-0.5)+
1.4019995886561440468*(Pr-0.5));
*green=QuantumRange*(0.99999975910502514331*Y-0.34413567816504303521*(Pb-0.5)-
0.71413649331646789076*(Pr-0.5));
*blue=QuantumRange*(1.00000124040004623180*Y+1.77200006607230409200*(Pb-0.5)+
2.1453384174593273e-06*(Pr-0.5));
}
static void ConvertYCbCrToRGB(const double Y,const double Cb,
const double Cr,double *red,double *green,double *blue)
{
ConvertYPbPrToRGB(Y,Cb,Cr,red,green,blue);
}
static void ConvertYIQToRGB(const double Y,const double I,const double Q,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+0.9562957197589482261*(I-0.5)+0.6210244164652610754*
(Q-0.5));
*green=QuantumRange*(Y-0.2721220993185104464*(I-0.5)-0.6473805968256950427*
(Q-0.5));
*blue=QuantumRange*(Y-1.1069890167364901945*(I-0.5)+1.7046149983646481374*
(Q-0.5));
}
static void ConvertYDbDrToRGB(const double Y,const double Db,const double Dr,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y+9.2303716147657e-05*(Db-0.5)-
0.52591263066186533*(Dr-0.5));
*green=QuantumRange*(Y-0.12913289889050927*(Db-0.5)+
0.26789932820759876*(Dr-0.5));
*blue=QuantumRange*(Y+0.66467905997895482*(Db-0.5)-
7.9202543533108e-05*(Dr-0.5));
}
static void ConvertYUVToRGB(const double Y,const double U,const double V,
double *red,double *green,double *blue)
{
*red=QuantumRange*(Y-3.945707070708279e-05*(U-0.5)+1.1398279671717170825*
(V-0.5));
*green=QuantumRange*(Y-0.3946101641414141437*(U-0.5)-0.5805003156565656797*
(V-0.5));
*blue=QuantumRange*(Y+2.0319996843434342537*(U-0.5)-4.813762626262513e-04*
(V-0.5));
}
static MagickBooleanType TransformsRGBImage(Image *image,
ExceptionInfo *exception)
{
#define TransformsRGBImageTag "Transform/Image"
static const float
YCCMap[1389] =
{
0.000000f, 0.000720f, 0.001441f, 0.002161f, 0.002882f, 0.003602f,
0.004323f, 0.005043f, 0.005764f, 0.006484f, 0.007205f, 0.007925f,
0.008646f, 0.009366f, 0.010086f, 0.010807f, 0.011527f, 0.012248f,
0.012968f, 0.013689f, 0.014409f, 0.015130f, 0.015850f, 0.016571f,
0.017291f, 0.018012f, 0.018732f, 0.019452f, 0.020173f, 0.020893f,
0.021614f, 0.022334f, 0.023055f, 0.023775f, 0.024496f, 0.025216f,
0.025937f, 0.026657f, 0.027378f, 0.028098f, 0.028818f, 0.029539f,
0.030259f, 0.030980f, 0.031700f, 0.032421f, 0.033141f, 0.033862f,
0.034582f, 0.035303f, 0.036023f, 0.036744f, 0.037464f, 0.038184f,
0.038905f, 0.039625f, 0.040346f, 0.041066f, 0.041787f, 0.042507f,
0.043228f, 0.043948f, 0.044669f, 0.045389f, 0.046110f, 0.046830f,
0.047550f, 0.048271f, 0.048991f, 0.049712f, 0.050432f, 0.051153f,
0.051873f, 0.052594f, 0.053314f, 0.054035f, 0.054755f, 0.055476f,
0.056196f, 0.056916f, 0.057637f, 0.058357f, 0.059078f, 0.059798f,
0.060519f, 0.061239f, 0.061960f, 0.062680f, 0.063401f, 0.064121f,
0.064842f, 0.065562f, 0.066282f, 0.067003f, 0.067723f, 0.068444f,
0.069164f, 0.069885f, 0.070605f, 0.071326f, 0.072046f, 0.072767f,
0.073487f, 0.074207f, 0.074928f, 0.075648f, 0.076369f, 0.077089f,
0.077810f, 0.078530f, 0.079251f, 0.079971f, 0.080692f, 0.081412f,
0.082133f, 0.082853f, 0.083573f, 0.084294f, 0.085014f, 0.085735f,
0.086455f, 0.087176f, 0.087896f, 0.088617f, 0.089337f, 0.090058f,
0.090778f, 0.091499f, 0.092219f, 0.092939f, 0.093660f, 0.094380f,
0.095101f, 0.095821f, 0.096542f, 0.097262f, 0.097983f, 0.098703f,
0.099424f, 0.100144f, 0.100865f, 0.101585f, 0.102305f, 0.103026f,
0.103746f, 0.104467f, 0.105187f, 0.105908f, 0.106628f, 0.107349f,
0.108069f, 0.108790f, 0.109510f, 0.110231f, 0.110951f, 0.111671f,
0.112392f, 0.113112f, 0.113833f, 0.114553f, 0.115274f, 0.115994f,
0.116715f, 0.117435f, 0.118156f, 0.118876f, 0.119597f, 0.120317f,
0.121037f, 0.121758f, 0.122478f, 0.123199f, 0.123919f, 0.124640f,
0.125360f, 0.126081f, 0.126801f, 0.127522f, 0.128242f, 0.128963f,
0.129683f, 0.130403f, 0.131124f, 0.131844f, 0.132565f, 0.133285f,
0.134006f, 0.134726f, 0.135447f, 0.136167f, 0.136888f, 0.137608f,
0.138329f, 0.139049f, 0.139769f, 0.140490f, 0.141210f, 0.141931f,
0.142651f, 0.143372f, 0.144092f, 0.144813f, 0.145533f, 0.146254f,
0.146974f, 0.147695f, 0.148415f, 0.149135f, 0.149856f, 0.150576f,
0.151297f, 0.152017f, 0.152738f, 0.153458f, 0.154179f, 0.154899f,
0.155620f, 0.156340f, 0.157061f, 0.157781f, 0.158501f, 0.159222f,
0.159942f, 0.160663f, 0.161383f, 0.162104f, 0.162824f, 0.163545f,
0.164265f, 0.164986f, 0.165706f, 0.166427f, 0.167147f, 0.167867f,
0.168588f, 0.169308f, 0.170029f, 0.170749f, 0.171470f, 0.172190f,
0.172911f, 0.173631f, 0.174352f, 0.175072f, 0.175793f, 0.176513f,
0.177233f, 0.177954f, 0.178674f, 0.179395f, 0.180115f, 0.180836f,
0.181556f, 0.182277f, 0.182997f, 0.183718f, 0.184438f, 0.185159f,
0.185879f, 0.186599f, 0.187320f, 0.188040f, 0.188761f, 0.189481f,
0.190202f, 0.190922f, 0.191643f, 0.192363f, 0.193084f, 0.193804f,
0.194524f, 0.195245f, 0.195965f, 0.196686f, 0.197406f, 0.198127f,
0.198847f, 0.199568f, 0.200288f, 0.201009f, 0.201729f, 0.202450f,
0.203170f, 0.203890f, 0.204611f, 0.205331f, 0.206052f, 0.206772f,
0.207493f, 0.208213f, 0.208934f, 0.209654f, 0.210375f, 0.211095f,
0.211816f, 0.212536f, 0.213256f, 0.213977f, 0.214697f, 0.215418f,
0.216138f, 0.216859f, 0.217579f, 0.218300f, 0.219020f, 0.219741f,
0.220461f, 0.221182f, 0.221902f, 0.222622f, 0.223343f, 0.224063f,
0.224784f, 0.225504f, 0.226225f, 0.226945f, 0.227666f, 0.228386f,
0.229107f, 0.229827f, 0.230548f, 0.231268f, 0.231988f, 0.232709f,
0.233429f, 0.234150f, 0.234870f, 0.235591f, 0.236311f, 0.237032f,
0.237752f, 0.238473f, 0.239193f, 0.239914f, 0.240634f, 0.241354f,
0.242075f, 0.242795f, 0.243516f, 0.244236f, 0.244957f, 0.245677f,
0.246398f, 0.247118f, 0.247839f, 0.248559f, 0.249280f, 0.250000f,
0.250720f, 0.251441f, 0.252161f, 0.252882f, 0.253602f, 0.254323f,
0.255043f, 0.255764f, 0.256484f, 0.257205f, 0.257925f, 0.258646f,
0.259366f, 0.260086f, 0.260807f, 0.261527f, 0.262248f, 0.262968f,
0.263689f, 0.264409f, 0.265130f, 0.265850f, 0.266571f, 0.267291f,
0.268012f, 0.268732f, 0.269452f, 0.270173f, 0.270893f, 0.271614f,
0.272334f, 0.273055f, 0.273775f, 0.274496f, 0.275216f, 0.275937f,
0.276657f, 0.277378f, 0.278098f, 0.278818f, 0.279539f, 0.280259f,
0.280980f, 0.281700f, 0.282421f, 0.283141f, 0.283862f, 0.284582f,
0.285303f, 0.286023f, 0.286744f, 0.287464f, 0.288184f, 0.288905f,
0.289625f, 0.290346f, 0.291066f, 0.291787f, 0.292507f, 0.293228f,
0.293948f, 0.294669f, 0.295389f, 0.296109f, 0.296830f, 0.297550f,
0.298271f, 0.298991f, 0.299712f, 0.300432f, 0.301153f, 0.301873f,
0.302594f, 0.303314f, 0.304035f, 0.304755f, 0.305476f, 0.306196f,
0.306916f, 0.307637f, 0.308357f, 0.309078f, 0.309798f, 0.310519f,
0.311239f, 0.311960f, 0.312680f, 0.313401f, 0.314121f, 0.314842f,
0.315562f, 0.316282f, 0.317003f, 0.317723f, 0.318444f, 0.319164f,
0.319885f, 0.320605f, 0.321326f, 0.322046f, 0.322767f, 0.323487f,
0.324207f, 0.324928f, 0.325648f, 0.326369f, 0.327089f, 0.327810f,
0.328530f, 0.329251f, 0.329971f, 0.330692f, 0.331412f, 0.332133f,
0.332853f, 0.333573f, 0.334294f, 0.335014f, 0.335735f, 0.336455f,
0.337176f, 0.337896f, 0.338617f, 0.339337f, 0.340058f, 0.340778f,
0.341499f, 0.342219f, 0.342939f, 0.343660f, 0.344380f, 0.345101f,
0.345821f, 0.346542f, 0.347262f, 0.347983f, 0.348703f, 0.349424f,
0.350144f, 0.350865f, 0.351585f, 0.352305f, 0.353026f, 0.353746f,
0.354467f, 0.355187f, 0.355908f, 0.356628f, 0.357349f, 0.358069f,
0.358790f, 0.359510f, 0.360231f, 0.360951f, 0.361671f, 0.362392f,
0.363112f, 0.363833f, 0.364553f, 0.365274f, 0.365994f, 0.366715f,
0.367435f, 0.368156f, 0.368876f, 0.369597f, 0.370317f, 0.371037f,
0.371758f, 0.372478f, 0.373199f, 0.373919f, 0.374640f, 0.375360f,
0.376081f, 0.376801f, 0.377522f, 0.378242f, 0.378963f, 0.379683f,
0.380403f, 0.381124f, 0.381844f, 0.382565f, 0.383285f, 0.384006f,
0.384726f, 0.385447f, 0.386167f, 0.386888f, 0.387608f, 0.388329f,
0.389049f, 0.389769f, 0.390490f, 0.391210f, 0.391931f, 0.392651f,
0.393372f, 0.394092f, 0.394813f, 0.395533f, 0.396254f, 0.396974f,
0.397695f, 0.398415f, 0.399135f, 0.399856f, 0.400576f, 0.401297f,
0.402017f, 0.402738f, 0.403458f, 0.404179f, 0.404899f, 0.405620f,
0.406340f, 0.407061f, 0.407781f, 0.408501f, 0.409222f, 0.409942f,
0.410663f, 0.411383f, 0.412104f, 0.412824f, 0.413545f, 0.414265f,
0.414986f, 0.415706f, 0.416427f, 0.417147f, 0.417867f, 0.418588f,
0.419308f, 0.420029f, 0.420749f, 0.421470f, 0.422190f, 0.422911f,
0.423631f, 0.424352f, 0.425072f, 0.425793f, 0.426513f, 0.427233f,
0.427954f, 0.428674f, 0.429395f, 0.430115f, 0.430836f, 0.431556f,
0.432277f, 0.432997f, 0.433718f, 0.434438f, 0.435158f, 0.435879f,
0.436599f, 0.437320f, 0.438040f, 0.438761f, 0.439481f, 0.440202f,
0.440922f, 0.441643f, 0.442363f, 0.443084f, 0.443804f, 0.444524f,
0.445245f, 0.445965f, 0.446686f, 0.447406f, 0.448127f, 0.448847f,
0.449568f, 0.450288f, 0.451009f, 0.451729f, 0.452450f, 0.453170f,
0.453891f, 0.454611f, 0.455331f, 0.456052f, 0.456772f, 0.457493f,
0.458213f, 0.458934f, 0.459654f, 0.460375f, 0.461095f, 0.461816f,
0.462536f, 0.463256f, 0.463977f, 0.464697f, 0.465418f, 0.466138f,
0.466859f, 0.467579f, 0.468300f, 0.469020f, 0.469741f, 0.470461f,
0.471182f, 0.471902f, 0.472622f, 0.473343f, 0.474063f, 0.474784f,
0.475504f, 0.476225f, 0.476945f, 0.477666f, 0.478386f, 0.479107f,
0.479827f, 0.480548f, 0.481268f, 0.481988f, 0.482709f, 0.483429f,
0.484150f, 0.484870f, 0.485591f, 0.486311f, 0.487032f, 0.487752f,
0.488473f, 0.489193f, 0.489914f, 0.490634f, 0.491354f, 0.492075f,
0.492795f, 0.493516f, 0.494236f, 0.494957f, 0.495677f, 0.496398f,
0.497118f, 0.497839f, 0.498559f, 0.499280f, 0.500000f, 0.500720f,
0.501441f, 0.502161f, 0.502882f, 0.503602f, 0.504323f, 0.505043f,
0.505764f, 0.506484f, 0.507205f, 0.507925f, 0.508646f, 0.509366f,
0.510086f, 0.510807f, 0.511527f, 0.512248f, 0.512968f, 0.513689f,
0.514409f, 0.515130f, 0.515850f, 0.516571f, 0.517291f, 0.518012f,
0.518732f, 0.519452f, 0.520173f, 0.520893f, 0.521614f, 0.522334f,
0.523055f, 0.523775f, 0.524496f, 0.525216f, 0.525937f, 0.526657f,
0.527378f, 0.528098f, 0.528818f, 0.529539f, 0.530259f, 0.530980f,
0.531700f, 0.532421f, 0.533141f, 0.533862f, 0.534582f, 0.535303f,
0.536023f, 0.536744f, 0.537464f, 0.538184f, 0.538905f, 0.539625f,
0.540346f, 0.541066f, 0.541787f, 0.542507f, 0.543228f, 0.543948f,
0.544669f, 0.545389f, 0.546109f, 0.546830f, 0.547550f, 0.548271f,
0.548991f, 0.549712f, 0.550432f, 0.551153f, 0.551873f, 0.552594f,
0.553314f, 0.554035f, 0.554755f, 0.555476f, 0.556196f, 0.556916f,
0.557637f, 0.558357f, 0.559078f, 0.559798f, 0.560519f, 0.561239f,
0.561960f, 0.562680f, 0.563401f, 0.564121f, 0.564842f, 0.565562f,
0.566282f, 0.567003f, 0.567723f, 0.568444f, 0.569164f, 0.569885f,
0.570605f, 0.571326f, 0.572046f, 0.572767f, 0.573487f, 0.574207f,
0.574928f, 0.575648f, 0.576369f, 0.577089f, 0.577810f, 0.578530f,
0.579251f, 0.579971f, 0.580692f, 0.581412f, 0.582133f, 0.582853f,
0.583573f, 0.584294f, 0.585014f, 0.585735f, 0.586455f, 0.587176f,
0.587896f, 0.588617f, 0.589337f, 0.590058f, 0.590778f, 0.591499f,
0.592219f, 0.592939f, 0.593660f, 0.594380f, 0.595101f, 0.595821f,
0.596542f, 0.597262f, 0.597983f, 0.598703f, 0.599424f, 0.600144f,
0.600865f, 0.601585f, 0.602305f, 0.603026f, 0.603746f, 0.604467f,
0.605187f, 0.605908f, 0.606628f, 0.607349f, 0.608069f, 0.608790f,
0.609510f, 0.610231f, 0.610951f, 0.611671f, 0.612392f, 0.613112f,
0.613833f, 0.614553f, 0.615274f, 0.615994f, 0.616715f, 0.617435f,
0.618156f, 0.618876f, 0.619597f, 0.620317f, 0.621037f, 0.621758f,
0.622478f, 0.623199f, 0.623919f, 0.624640f, 0.625360f, 0.626081f,
0.626801f, 0.627522f, 0.628242f, 0.628963f, 0.629683f, 0.630403f,
0.631124f, 0.631844f, 0.632565f, 0.633285f, 0.634006f, 0.634726f,
0.635447f, 0.636167f, 0.636888f, 0.637608f, 0.638329f, 0.639049f,
0.639769f, 0.640490f, 0.641210f, 0.641931f, 0.642651f, 0.643372f,
0.644092f, 0.644813f, 0.645533f, 0.646254f, 0.646974f, 0.647695f,
0.648415f, 0.649135f, 0.649856f, 0.650576f, 0.651297f, 0.652017f,
0.652738f, 0.653458f, 0.654179f, 0.654899f, 0.655620f, 0.656340f,
0.657061f, 0.657781f, 0.658501f, 0.659222f, 0.659942f, 0.660663f,
0.661383f, 0.662104f, 0.662824f, 0.663545f, 0.664265f, 0.664986f,
0.665706f, 0.666427f, 0.667147f, 0.667867f, 0.668588f, 0.669308f,
0.670029f, 0.670749f, 0.671470f, 0.672190f, 0.672911f, 0.673631f,
0.674352f, 0.675072f, 0.675793f, 0.676513f, 0.677233f, 0.677954f,
0.678674f, 0.679395f, 0.680115f, 0.680836f, 0.681556f, 0.682277f,
0.682997f, 0.683718f, 0.684438f, 0.685158f, 0.685879f, 0.686599f,
0.687320f, 0.688040f, 0.688761f, 0.689481f, 0.690202f, 0.690922f,
0.691643f, 0.692363f, 0.693084f, 0.693804f, 0.694524f, 0.695245f,
0.695965f, 0.696686f, 0.697406f, 0.698127f, 0.698847f, 0.699568f,
0.700288f, 0.701009f, 0.701729f, 0.702450f, 0.703170f, 0.703891f,
0.704611f, 0.705331f, 0.706052f, 0.706772f, 0.707493f, 0.708213f,
0.708934f, 0.709654f, 0.710375f, 0.711095f, 0.711816f, 0.712536f,
0.713256f, 0.713977f, 0.714697f, 0.715418f, 0.716138f, 0.716859f,
0.717579f, 0.718300f, 0.719020f, 0.719741f, 0.720461f, 0.721182f,
0.721902f, 0.722622f, 0.723343f, 0.724063f, 0.724784f, 0.725504f,
0.726225f, 0.726945f, 0.727666f, 0.728386f, 0.729107f, 0.729827f,
0.730548f, 0.731268f, 0.731988f, 0.732709f, 0.733429f, 0.734150f,
0.734870f, 0.735591f, 0.736311f, 0.737032f, 0.737752f, 0.738473f,
0.739193f, 0.739914f, 0.740634f, 0.741354f, 0.742075f, 0.742795f,
0.743516f, 0.744236f, 0.744957f, 0.745677f, 0.746398f, 0.747118f,
0.747839f, 0.748559f, 0.749280f, 0.750000f, 0.750720f, 0.751441f,
0.752161f, 0.752882f, 0.753602f, 0.754323f, 0.755043f, 0.755764f,
0.756484f, 0.757205f, 0.757925f, 0.758646f, 0.759366f, 0.760086f,
0.760807f, 0.761527f, 0.762248f, 0.762968f, 0.763689f, 0.764409f,
0.765130f, 0.765850f, 0.766571f, 0.767291f, 0.768012f, 0.768732f,
0.769452f, 0.770173f, 0.770893f, 0.771614f, 0.772334f, 0.773055f,
0.773775f, 0.774496f, 0.775216f, 0.775937f, 0.776657f, 0.777378f,
0.778098f, 0.778818f, 0.779539f, 0.780259f, 0.780980f, 0.781700f,
0.782421f, 0.783141f, 0.783862f, 0.784582f, 0.785303f, 0.786023f,
0.786744f, 0.787464f, 0.788184f, 0.788905f, 0.789625f, 0.790346f,
0.791066f, 0.791787f, 0.792507f, 0.793228f, 0.793948f, 0.794669f,
0.795389f, 0.796109f, 0.796830f, 0.797550f, 0.798271f, 0.798991f,
0.799712f, 0.800432f, 0.801153f, 0.801873f, 0.802594f, 0.803314f,
0.804035f, 0.804755f, 0.805476f, 0.806196f, 0.806916f, 0.807637f,
0.808357f, 0.809078f, 0.809798f, 0.810519f, 0.811239f, 0.811960f,
0.812680f, 0.813401f, 0.814121f, 0.814842f, 0.815562f, 0.816282f,
0.817003f, 0.817723f, 0.818444f, 0.819164f, 0.819885f, 0.820605f,
0.821326f, 0.822046f, 0.822767f, 0.823487f, 0.824207f, 0.824928f,
0.825648f, 0.826369f, 0.827089f, 0.827810f, 0.828530f, 0.829251f,
0.829971f, 0.830692f, 0.831412f, 0.832133f, 0.832853f, 0.833573f,
0.834294f, 0.835014f, 0.835735f, 0.836455f, 0.837176f, 0.837896f,
0.838617f, 0.839337f, 0.840058f, 0.840778f, 0.841499f, 0.842219f,
0.842939f, 0.843660f, 0.844380f, 0.845101f, 0.845821f, 0.846542f,
0.847262f, 0.847983f, 0.848703f, 0.849424f, 0.850144f, 0.850865f,
0.851585f, 0.852305f, 0.853026f, 0.853746f, 0.854467f, 0.855187f,
0.855908f, 0.856628f, 0.857349f, 0.858069f, 0.858790f, 0.859510f,
0.860231f, 0.860951f, 0.861671f, 0.862392f, 0.863112f, 0.863833f,
0.864553f, 0.865274f, 0.865994f, 0.866715f, 0.867435f, 0.868156f,
0.868876f, 0.869597f, 0.870317f, 0.871037f, 0.871758f, 0.872478f,
0.873199f, 0.873919f, 0.874640f, 0.875360f, 0.876081f, 0.876801f,
0.877522f, 0.878242f, 0.878963f, 0.879683f, 0.880403f, 0.881124f,
0.881844f, 0.882565f, 0.883285f, 0.884006f, 0.884726f, 0.885447f,
0.886167f, 0.886888f, 0.887608f, 0.888329f, 0.889049f, 0.889769f,
0.890490f, 0.891210f, 0.891931f, 0.892651f, 0.893372f, 0.894092f,
0.894813f, 0.895533f, 0.896254f, 0.896974f, 0.897695f, 0.898415f,
0.899135f, 0.899856f, 0.900576f, 0.901297f, 0.902017f, 0.902738f,
0.903458f, 0.904179f, 0.904899f, 0.905620f, 0.906340f, 0.907061f,
0.907781f, 0.908501f, 0.909222f, 0.909942f, 0.910663f, 0.911383f,
0.912104f, 0.912824f, 0.913545f, 0.914265f, 0.914986f, 0.915706f,
0.916427f, 0.917147f, 0.917867f, 0.918588f, 0.919308f, 0.920029f,
0.920749f, 0.921470f, 0.922190f, 0.922911f, 0.923631f, 0.924352f,
0.925072f, 0.925793f, 0.926513f, 0.927233f, 0.927954f, 0.928674f,
0.929395f, 0.930115f, 0.930836f, 0.931556f, 0.932277f, 0.932997f,
0.933718f, 0.934438f, 0.935158f, 0.935879f, 0.936599f, 0.937320f,
0.938040f, 0.938761f, 0.939481f, 0.940202f, 0.940922f, 0.941643f,
0.942363f, 0.943084f, 0.943804f, 0.944524f, 0.945245f, 0.945965f,
0.946686f, 0.947406f, 0.948127f, 0.948847f, 0.949568f, 0.950288f,
0.951009f, 0.951729f, 0.952450f, 0.953170f, 0.953891f, 0.954611f,
0.955331f, 0.956052f, 0.956772f, 0.957493f, 0.958213f, 0.958934f,
0.959654f, 0.960375f, 0.961095f, 0.961816f, 0.962536f, 0.963256f,
0.963977f, 0.964697f, 0.965418f, 0.966138f, 0.966859f, 0.967579f,
0.968300f, 0.969020f, 0.969741f, 0.970461f, 0.971182f, 0.971902f,
0.972622f, 0.973343f, 0.974063f, 0.974784f, 0.975504f, 0.976225f,
0.976945f, 0.977666f, 0.978386f, 0.979107f, 0.979827f, 0.980548f,
0.981268f, 0.981988f, 0.982709f, 0.983429f, 0.984150f, 0.984870f,
0.985591f, 0.986311f, 0.987032f, 0.987752f, 0.988473f, 0.989193f,
0.989914f, 0.990634f, 0.991354f, 0.992075f, 0.992795f, 0.993516f,
0.994236f, 0.994957f, 0.995677f, 0.996398f, 0.997118f, 0.997839f,
0.998559f, 0.999280f, 1.000000f
};
CacheView
*image_view;
const char
*artifact;
IlluminantType
illuminant = D65Illuminant;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
i;
ssize_t
y;
TransformPacket
*y_map,
*x_map,
*z_map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
artifact=GetImageArtifact(image,"color:illuminant");
if (artifact != (const char *) NULL)
{
illuminant=(IlluminantType) ParseCommandOption(MagickIlluminantOptions,
MagickFalse,artifact);
if ((ssize_t) illuminant < 0)
illuminant=UndefinedIlluminant;
}
status=MagickTrue;
progress=0;
switch (image->colorspace)
{
case CMYKColorspace:
{
PixelInfo
zero;
/*
Transform image from CMYK to sRGB.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
GetPixelInfoPixel(image,q,&pixel);
ConvertCMYKToRGB(&pixel);
SetPixelViaPixelInfo(image,&pixel,q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LinearGRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*EncodePixelGamma(GetPixelRed(image,q))+0.715158*
EncodePixelGamma(GetPixelGreen(image,q))+0.072186*
EncodePixelGamma(GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case GRAYColorspace:
{
/*
Transform linear GRAY to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
MagickRealType
gray;
gray=0.212656*GetPixelRed(image,q)+0.715158*GetPixelGreen(image,q)+
0.072186*GetPixelBlue(image,q);
SetPixelRed(image,ClampToQuantum(gray),q);
SetPixelGreen(image,ClampToQuantum(gray),q);
SetPixelBlue(image,ClampToQuantum(gray),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case Adobe98Colorspace:
case CMYColorspace:
case DisplayP3Colorspace:
case HCLColorspace:
case HCLpColorspace:
case HSBColorspace:
case HSIColorspace:
case HSLColorspace:
case HSVColorspace:
case HWBColorspace:
case JzazbzColorspace:
case LabColorspace:
case LCHColorspace:
case LCHabColorspace:
case LCHuvColorspace:
case LMSColorspace:
case LuvColorspace:
case ProPhotoColorspace:
case xyYColorspace:
case XYZColorspace:
case YCbCrColorspace:
case YDbDrColorspace:
case YIQColorspace:
case YPbPrColorspace:
case YUVColorspace:
{
const char
*value;
double
white_luminance;
/*
Transform image from source colorspace to sRGB.
*/
white_luminance=10000.0;
value=GetImageProperty(image,"white-luminance",exception);
if (value != (const char *) NULL)
white_luminance=StringToDouble(value,(char **) NULL);
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
blue,
green,
red,
X,
Y,
Z;
X=QuantumScale*GetPixelRed(image,q);
Y=QuantumScale*GetPixelGreen(image,q);
Z=QuantumScale*GetPixelBlue(image,q);
switch (image->colorspace)
{
case Adobe98Colorspace:
{
ConvertAdobe98ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case CMYColorspace:
{
ConvertCMYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case DisplayP3Colorspace:
{
ConvertDisplayP3ToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLColorspace:
{
ConvertHCLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HCLpColorspace:
{
ConvertHCLpToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSBColorspace:
{
ConvertHSBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSIColorspace:
{
ConvertHSIToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSLColorspace:
{
ConvertHSLToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HSVColorspace:
{
ConvertHSVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case HWBColorspace:
{
ConvertHWBToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case JzazbzColorspace:
{
ConvertJzazbzToRGB(X,Y,Z,white_luminance,&red,&green,&blue);
break;
}
case LabColorspace:
{
ConvertLabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHColorspace:
case LCHabColorspace:
{
ConvertLCHabToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LCHuvColorspace:
{
ConvertLCHuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case LMSColorspace:
{
ConvertLMSToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case LuvColorspace:
{
ConvertLuvToRGB(X,Y,Z,illuminant,&red,&green,&blue);
break;
}
case ProPhotoColorspace:
{
ConvertProPhotoToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case xyYColorspace:
{
ConvertxyYToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case XYZColorspace:
{
ConvertXYZToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YCbCrColorspace:
{
ConvertYCbCrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YDbDrColorspace:
{
ConvertYDbDrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YIQColorspace:
{
ConvertYIQToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YPbPrColorspace:
{
ConvertYPbPrToRGB(X,Y,Z,&red,&green,&blue);
break;
}
case YUVColorspace:
{
ConvertYUVToRGB(X,Y,Z,&red,&green,&blue);
break;
}
default:
{
red=QuantumRange*X;
green=QuantumRange*Y;
blue=QuantumRange*Z;
break;
}
}
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case LogColorspace:
{
const char
*value;
double
black,
density,
film_gamma,
gamma,
reference_black,
reference_white;
Quantum
*logmap;
/*
Transform Log to sRGB colorspace.
*/
density=DisplayGamma;
gamma=DisplayGamma;
value=GetImageProperty(image,"gamma",exception);
if (value != (const char *) NULL)
gamma=PerceptibleReciprocal(StringToDouble(value,(char **) NULL));
film_gamma=FilmGamma;
value=GetImageProperty(image,"film-gamma",exception);
if (value != (const char *) NULL)
film_gamma=StringToDouble(value,(char **) NULL);
reference_black=ReferenceBlack;
value=GetImageProperty(image,"reference-black",exception);
if (value != (const char *) NULL)
reference_black=StringToDouble(value,(char **) NULL);
reference_white=ReferenceWhite;
value=GetImageProperty(image,"reference-white",exception);
if (value != (const char *) NULL)
reference_white=StringToDouble(value,(char **) NULL);
logmap=(Quantum *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*logmap));
if (logmap == (Quantum *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
black=pow(10.0,(reference_black-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma));
for (i=0; i <= (ssize_t) (reference_black*MaxMap/1024.0); i++)
logmap[i]=(Quantum) 0;
for ( ; i < (ssize_t) (reference_white*MaxMap/1024.0); i++)
logmap[i]=ClampToQuantum(QuantumRange/(1.0-black)*
(pow(10.0,(1024.0*i/MaxMap-reference_white)*(gamma/density)*0.002*
PerceptibleReciprocal(film_gamma))-black));
for ( ; i <= (ssize_t) MaxMap; i++)
logmap[i]=QuantumRange;
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=(double) logmap[ScaleQuantumToMap(GetPixelRed(image,q))];
green=(double) logmap[ScaleQuantumToMap(GetPixelGreen(image,q))];
blue=(double) logmap[ScaleQuantumToMap(GetPixelBlue(image,q))];
SetPixelRed(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
red)),q);
SetPixelGreen(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
green)),q);
SetPixelBlue(image,ClampToQuantum(EncodePixelGamma((MagickRealType)
blue)),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
logmap=(Quantum *) RelinquishMagickMemory(logmap);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
case RGBColorspace:
case scRGBColorspace:
{
/*
Transform linear RGB to sRGB colorspace.
*/
if (image->storage_class == PseudoClass)
{
if (SyncImage(image,exception) == MagickFalse)
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=(ssize_t) image->columns; x != 0; x--)
{
double
blue,
green,
red;
red=EncodePixelGamma((MagickRealType) GetPixelRed(image,q));
green=EncodePixelGamma((MagickRealType) GetPixelGreen(image,q));
blue=EncodePixelGamma((MagickRealType) GetPixelBlue(image,q));
SetPixelRed(image,ClampToQuantum(red),q);
SetPixelGreen(image,ClampToQuantum(green),q);
SetPixelBlue(image,ClampToQuantum(blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(status);
}
default:
break;
}
/*
Allocate the tables.
*/
x_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*x_map));
y_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*y_map));
z_map=(TransformPacket *) AcquireQuantumMemory((size_t) MaxMap+1UL,
sizeof(*z_map));
if ((x_map == (TransformPacket *) NULL) ||
(y_map == (TransformPacket *) NULL) ||
(z_map == (TransformPacket *) NULL))
{
if (z_map != (TransformPacket *) NULL)
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
if (y_map != (TransformPacket *) NULL)
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
if (x_map != (TransformPacket *) NULL)
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
switch (image->colorspace)
{
case OHTAColorspace:
{
/*
Initialize OHTA tables:
I1 = 0.33333*R+0.33334*G+0.33333*B
I2 = 0.50000*R+0.00000*G-0.50000*B
I3 =-0.25000*R+0.50000*G-0.25000*B
R = I1+1.00000*I2-0.66668*I3
G = I1+0.00000*I2+1.33333*I3
B = I1-1.00000*I2-0.66668*I3
I and Q, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) (0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].x=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*(double) i);
y_map[i].y=(MagickRealType) (0.5*0.00000*(2.0*(double) i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*1.33333*(2.0*(double) i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*(double) i);
y_map[i].z=(MagickRealType) (-0.5*1.00000*(2.0*(double) i-MaxMap));
z_map[i].z=(MagickRealType) (-0.5*0.66668*(2.0*(double) i-MaxMap));
}
break;
}
case Rec601YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.402000*Cr
G = Y-0.344136*Cb-0.714136*Cr
B = Y+1.772000*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=0.99999999999914679361*(double) i;
y_map[i].x=0.5*(-1.2188941887145875e-06)*(2.00*(double) i-MaxMap);
z_map[i].x=0.5*1.4019995886561440468*(2.00*(double) i-MaxMap);
x_map[i].y=0.99999975910502514331*(double) i;
y_map[i].y=0.5*(-0.34413567816504303521)*(2.00*(double) i-MaxMap);
z_map[i].y=0.5*(-0.71413649331646789076)*(2.00*(double) i-MaxMap);
x_map[i].z=1.00000124040004623180*(double) i;
y_map[i].z=0.5*1.77200006607230409200*(2.00*(double) i-MaxMap);
z_map[i].z=0.5*2.1453384174593273e-06*(2.00*(double) i-MaxMap);
}
break;
}
case Rec709YCbCrColorspace:
{
/*
Initialize YCbCr tables:
R = Y +1.574800*Cr
G = Y-0.187324*Cb-0.468124*Cr
B = Y+1.855600*Cb
Cb and Cr, normally -0.5 through 0.5, must be normalized to the range 0
through QuantumRange.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*i);
y_map[i].x=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
z_map[i].x=(MagickRealType) (0.5*1.574800*(2.0*i-MaxMap));
x_map[i].y=(MagickRealType) (1.0*i);
y_map[i].y=(MagickRealType) (0.5*(-0.187324)*(2.0*i-MaxMap));
z_map[i].y=(MagickRealType) (0.5*(-0.468124)*(2.0*i-MaxMap));
x_map[i].z=(MagickRealType) (1.0*i);
y_map[i].z=(MagickRealType) (0.5*1.855600*(2.0*i-MaxMap));
z_map[i].z=(MagickRealType) (0.5*0.000000*(2.0*i-MaxMap));
}
break;
}
case YCCColorspace:
{
/*
Initialize YCC tables:
R = Y +1.340762*C2
G = Y-0.317038*C1-0.682243*C2
B = Y+1.632639*C1
YCC is scaled by 1.3584. C1 zero is 156 and C2 is at 137.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.3584000*(double) i);
y_map[i].x=(MagickRealType) 0.0000000;
z_map[i].x=(MagickRealType) (1.8215000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].y=(MagickRealType) (1.3584000*(double) i);
y_map[i].y=(MagickRealType) (-0.4302726*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].y=(MagickRealType) (-0.9271435*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(137))));
x_map[i].z=(MagickRealType) (1.3584000*(double) i);
y_map[i].z=(MagickRealType) (2.2179000*(1.0*(double) i-(double)
ScaleQuantumToMap(ScaleCharToQuantum(156))));
z_map[i].z=(MagickRealType) 0.0000000;
}
break;
}
default:
{
/*
Linear conversion tables.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static)
#endif
for (i=0; i <= (ssize_t) MaxMap; i++)
{
x_map[i].x=(MagickRealType) (1.0*(double) i);
y_map[i].x=(MagickRealType) 0.0;
z_map[i].x=(MagickRealType) 0.0;
x_map[i].y=(MagickRealType) 0.0;
y_map[i].y=(MagickRealType) (1.0*(double) i);
z_map[i].y=(MagickRealType) 0.0;
x_map[i].z=(MagickRealType) 0.0;
y_map[i].z=(MagickRealType) 0.0;
z_map[i].z=(MagickRealType) (1.0*(double) i);
}
break;
}
}
/*
Convert to sRGB.
*/
switch (image->storage_class)
{
case DirectClass:
default:
{
/*
Convert DirectClass image.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
ssize_t
x;
Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
size_t
blue,
green,
red;
red=ScaleQuantumToMap(GetPixelRed(image,q));
green=ScaleQuantumToMap(GetPixelGreen(image,q));
blue=ScaleQuantumToMap(GetPixelBlue(image,q));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
SetPixelRed(image,ClampToQuantum(pixel.red),q);
SetPixelGreen(image,ClampToQuantum(pixel.green),q);
SetPixelBlue(image,ClampToQuantum(pixel.blue),q);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransformsRGBImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
break;
}
case PseudoClass:
{
/*
Convert PseudoClass image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (i=0; i < (ssize_t) image->colors; i++)
{
PixelInfo
pixel;
size_t
blue,
green,
red;
red=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red));
green=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green));
blue=ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue));
pixel.red=x_map[red].x+y_map[green].x+z_map[blue].x;
pixel.green=x_map[red].y+y_map[green].y+z_map[blue].y;
pixel.blue=x_map[red].z+y_map[green].z+z_map[blue].z;
if (image->colorspace == YCCColorspace)
{
pixel.red=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.red/
(double) MaxMap)];
pixel.green=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.green/
(double) MaxMap)];
pixel.blue=QuantumRange*YCCMap[RoundToYCC(1024.0*pixel.blue/
(double) MaxMap)];
}
else
{
pixel.red=(MagickRealType) ScaleMapToQuantum(pixel.red);
pixel.green=(MagickRealType) ScaleMapToQuantum(pixel.green);
pixel.blue=(MagickRealType) ScaleMapToQuantum(pixel.blue);
}
image->colormap[i].red=(double) ClampToQuantum(pixel.red);
image->colormap[i].green=(double) ClampToQuantum(pixel.green);
image->colormap[i].blue=(double) ClampToQuantum(pixel.blue);
}
(void) SyncImage(image,exception);
break;
}
}
/*
Relinquish resources.
*/
z_map=(TransformPacket *) RelinquishMagickMemory(z_map);
y_map=(TransformPacket *) RelinquishMagickMemory(y_map);
x_map=(TransformPacket *) RelinquishMagickMemory(x_map);
if (SetImageColorspace(image,sRGBColorspace,exception) == MagickFalse)
return(MagickFalse);
return(MagickTrue);
}
|
GB_unaryop__lnot_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint32_int8
// op(A') function: GB_tran__lnot_uint32_int8
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint32_int8
(
uint32_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__gt_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__gt_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_int64)
// A*D function (colscale): GB (_AxD__gt_int64)
// D*A function (rowscale): GB (_DxB__gt_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_int64)
// C=scalar+B GB (_bind1st__gt_int64)
// C=scalar+B' GB (_bind1st_tran__gt_int64)
// C=A+scalar GB (_bind2nd__gt_int64)
// C=A'+scalar GB (_bind2nd_tran__gt_int64)
// C type: bool
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_INT64 || GxB_NO_GT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
denseParallelJacobi.h | //
// Created by mbarb on 24/01/2018.
//
#ifndef PARALLELITERATIVE_DENSEPARALLELJACOBI_H
#define PARALLELITERATIVE_DENSEPARALLELJACOBI_H
#include <omp.h>
namespace Iterative {
template <typename Scalar, long long SIZE>
class denseParallelJacobi {
public:
/**
*
* @param A linear system matrix of max rank
* @param b known terms vector
* @param iterations max number of iterations
* @param tolerance min error tolerated
* @param workers number of threads
*/
explicit denseParallelJacobi(
const Eigen::Matrix<Scalar, SIZE, SIZE>& A,
const Eigen::ColumnVector<Scalar, SIZE>& b,
const ulonglong iterations,
const Scalar tolerance,
const ulong workers=0L) :
A(A), b(b), iterations(iterations), tolerance(tolerance),
workers(workers), solution(b) {
solution.fill((Scalar)1/solution.size());
omp_set_num_threads(workers);
}
const Eigen::ColumnVector<Scalar, SIZE> solve() {
Eigen::ColumnVector<Scalar, SIZE> oldSolution(solution);
std::vector<ulonglong> index(solution.size());
for (ulonglong i = 0; i < solution.size(); ++i)
index[i]=i;
std::vector<ulonglong> remove;
for (iteration = 0; iteration < iterations; ++iteration) {
//calculate solutions parallelizing on rows
#pragma omp parallel for schedule(dynamic)
for (auto i = 0; i < index.size(); ++i){
auto el = index[i];
solution[el] = solution_find(b[el], el, oldSolution);
Scalar error = std::abs(solution[el]-oldSolution[el]);
if(error <= tolerance){
#pragma omp critical
remove.emplace_back(i);
}
}
if(!remove.empty()){
std::sort(remove.rbegin(), remove.rend());
for (auto i : remove) {
index.erase(index.begin() + i);
}
remove.clear();
if (index.empty()) break;
}
std::swap(solution, oldSolution);
}
std::cout << iteration << std::endl;
return this->solution;
}
const Eigen::ColumnVector<Scalar, SIZE> &getSolution() const {
return solution;
}
const long getIteration() const {
return iteration;
}
protected:
const Eigen::Matrix<Scalar, SIZE, SIZE>& A;
const Eigen::ColumnVector<Scalar, SIZE>& b;
const ulonglong iterations;
const Scalar tolerance;
const ulong workers;
Eigen::ColumnVector<Scalar, SIZE> solution;
long iteration = 0L;
private:
/**
* utility function implementing the jacobi method in order to find one solution
* @param row coeffiient row
* @param solutions vector solution
* @param term right term vector
* @param index index of the solution
* @return solution component
*/
inline Scalar solution_find(Scalar term, const ulonglong index, Eigen::ColumnVector<Scalar,SIZE>& oldSolution) {
term -= A.row(index) * oldSolution;
return (term + A(index, index) * oldSolution[index]) / A(index, index);
}
};
};
#endif //PARALLELITERATIVE_JACOBI_H
|
hvs_multi_moments.c | //
// Copyright (C) 2010-2011, Vitalii Ostrovskyi <vitalii@ostrovskyi.org.ua>
// Author: Vitalii Ostrovskyi <vitalii@ostrovskyi.org.ua>
//
#include <omp.h>
#if HVS_DEBUG>2
#include <stdio.h>
#endif
#ifdef HVS_PROFILE
#include <time.h>
#endif
int init_ode_data(hvs_ode_data *data, hvs_state *input) {
data->ncenters = input->ncenters;
data->lambdasq = input->lambdasq;
data->moments = (hvs_moment *) malloc(input->ncenters*sizeof(hvs_moment));
if (data->moments != NULL) {
memset(data->moments, 0, input->ncenters*sizeof(hvs_moment));
} else {
return HVS_ERR;
}
data->centers = (hvs_center *) malloc(input->ncenters*sizeof(hvs_center));
if (data->centers != NULL) {
memset(data->centers, 0, input->ncenters*sizeof(hvs_center));
} else {
free(data->moments);
return HVS_ERR;
}
return HVS_OK;
}
int free_ode_data(hvs_ode_data *data) {
if (data->moments != NULL)
free(data->moments);
data->moments = NULL;
if (data->centers != NULL)
free(data->centers);
data->centers = NULL;
return HVS_OK;
}
int update_vorticity_field(hvs_state *state) {
int i,j,k,k1,k2;
FLOAT_TYPE sum;
#ifdef HVS_PROFILE
time_t starttime, endtime;
starttime = time(NULL);
#endif
#if HVS_DEBUG
printf("update_vorticity_field\n");
#endif
for (i=0; i<state->size; i++) {
sum = 0.0;
#pragma omp parallel for reduction(+:sum) private(j) shared(state)
for (j=0; j<state->ncenters; j++) {
for(k=0; j<NCOMBS;j++) {
k1 = COMBS_IND1(k);
k2 = COMBS_IND2(k);
sum += state->moments[j][k]*he(state->grid[i].x-state->centers[j].x,state->grid[i].y-state->centers[j].y,state->lambdasq,k1,k2);
}
}
state->vorticity_field[i] = sum;
}
#ifdef HVS_PROFILE
endtime = time(NULL);
timings.vorticity_update += (int)(endtime-starttime);
timings.vorticity_update = (int)(timings.vorticity_update/2);
#endif
return HVS_OK;
}
int init_moments(hvs_state *state) {
FLOAT_TYPE *A,*x;
UINT i,j,k;
int status;
#ifdef HVS_PROFILE
time_t starttime, endtime;
starttime = time(NULL);
#endif
#if HVS_DEBUG
printf("init_moments\n");
#endif
if (state->size!=state->ncenters) {
return HVS_ERR;
}
if ((A=malloc(sizeof(FLOAT_TYPE)*state->size*state->size))==NULL) {
return HVS_ERR;
}
if ((x=malloc(sizeof(FLOAT_TYPE)*state->size))==NULL) {
free(A);
return HVS_ERR;
}
memset(x,0,sizeof(FLOAT_TYPE)*state->size);
#pragma omp parallel for collapse(2) private(i,j) shared(A,state)
for(i=0;i<state->size;i++)
for(j=0;j<state->size;j++)
A[state->size*i+j] =
he(state->grid[i].x-state->centers[j].x,
state->grid[i].y-state->centers[j].y,
state->lambdasq,
0,0);
if ((status=gmres(A,x,state->vorticity_field,state->size,HVS_GMRES_PRECISION,HVS_GMRES_MAX_INNER_MATRIX,x))!=HVS_OK) {
free(A);
free(x);
return status;
}
// Get moments
for(i=0;i<state->ncenters;i++) {
state->moments[i][0]=x[i];
}
free(A);
free(x);
#ifdef HVS_PROFILE
endtime = time(NULL);
timings.init_moments = (int)(endtime-starttime);
#endif
return HVS_OK;
}
int eval_eq(hvs_ode_data *input, hvs_ode_data *output, hvs_coefs *coefs, FLOAT_TYPE time_val) {
FLOAT_TYPE lambdasq = input->lambdasq;
FLOAT_TYPE gamma1,gamma2;
int i0,j0,k,k1,k2,l,l1,l2,m,m1,m2,i,j;
#ifdef HVS_PROFILE
time_t starttime, endtime;
starttime = time(NULL);
#endif
output->lambdasq = input->lambdasq;
#if HVS_DEBUG
printf("eval_eq\n");
#endif
// Parallelize execution
#pragma omp parallel default(none) shared(input,output,coefs,time_val,lambdasq)
{
// First evaluate centers equation
#pragma omp for private(i0,j0,l,m,l1,l2,m1,m2)
for (i0=0; i0<input->ncenters; i0++) {
output->centers[i0].x=(FLOAT_TYPE)0.0;
output->centers[i0].y=(FLOAT_TYPE)0.0;
for(j0=0; j0<input->ncenters; j0++) {
if (i0==j0) continue;
for(l=0;l<NCOMBS;l++) {
l1=COMBS_IND1(l);
l2=COMBS_IND2(l);
for(m=0;m<NCOMBS;m++) {
m1=COMBS_IND1(m);
m2=COMBS_IND2(m);
output->centers[i0].x +=
input->moments[j0][MOM_INDEX(l1,l2)]*
input->moments[i0][MOM_INDEX(m1,m2)]*
POWN1(m1+m2)*
hb1(
input->centers[i0].x-input->centers[j0].x,
input->centers[i0].y-input->centers[j0].y,
input->lambdasq,
m1+l1,m2+l2);
output->centers[i0].y +=
input->moments[j0][MOM_INDEX(l1,l2)]*
input->moments[i0][MOM_INDEX(m1,m2)]*
POWN1(m1+m2)*
hb2(
input->centers[i0].x-input->centers[j0].x,
input->centers[i0].y-input->centers[j0].y,
input->lambdasq,
m1+l1,m2+l2);
}
}
}
output->centers[i0].x/=input->moments[i0][MOM_INDEX(0,0)];
output->centers[i0].y/=input->moments[i0][MOM_INDEX(0,0)];
}
// Now get the moments equations
#pragma omp for collapse(2) private(i0,j0,l,m,k,k1,k2,l1,l2,m1,m2,gamma1,gamma2,i,j)
for (i0=0; i0<input->ncenters; i0++) {
for(j0=0; j0<input->ncenters; j0++)
for(k=0;k<NCOMBS;k++) {
k1=COMBS_IND1(k);
k2=COMBS_IND2(k);
for(l=0;l<NCOMBS;l++) {
l1=COMBS_IND1(l);
l2=COMBS_IND2(l);
for(m=0;m<NCOMBS;m++) {
m1=COMBS_IND1(m);
m2=COMBS_IND2(m);
gamma1=0.0;
gamma2=0.0;
for (i=0;i<=NMOMENTS;i++)
for (j=0;j<=NMOMENTS;j++) {
if (j0==i0) {
// If j'==j gammas will come from A
if ( (i<=MIN(l1,k1-1)) && (j<=MIN(l2,k2)) )
gamma1+=coefs->gamma1[COEF_INDEX(k1,k2,l1,l2,m1,m2,i,j)]/
POW(lambdasq,i+j+1)*h1(m1+k1-i-1+l1-i,m2+k2-j+l2-j,lambdasq);
if ( (i<=MIN(l1,k1)) && (j<=MIN(l2,k2-1)) )
gamma2+=coefs->gamma2[COEF_INDEX(k1,k2,l1,l2,m1,m2,i,j)]/
POW(lambdasq,i+j+1)*h2(m1+k1-i+l1-i,m2+k2-j-1+l2-j,lambdasq);
} else {
// If j'<>j gammas will come from B
if ((i<=MIN(l1,k1-1)) && (j<=MIN(l2,k2)))
gamma1+=coefs->gamma1[COEF_INDEX(k1,k2,l1,l2,m1,m2,i,j)]/
POW(lambdasq,i+j+1)*hb1(input->centers[i0].x-input->centers[j0].x,
input->centers[i0].y-input->centers[j0].y,
lambdasq,m1+k1-i-1+l1-i,m2+k2-j+l2-j);
if ((i<=MIN(l1,k1)) && (j<=MIN(l2,k2-1)))
gamma2+=coefs->gamma2[COEF_INDEX(k1,k2,l1,l2,m1,m2,i,j)]/
POW(lambdasq,i+j+1)*hb2(input->centers[i0].x-input->centers[j0].x,
input->centers[i0].y-input->centers[j0].y,
lambdasq,m1+k1-i+l1-i,m2+k2-j-1+l2-j);
}
}
// Add A+B
output->moments[i0][MOM_INDEX(k1,k2)] += (gamma1+gamma2)*
input->moments[i0][MOM_INDEX(l1,l2)]*
input->moments[j0][MOM_INDEX(m1,m2)]*
POWN1(k1+k2)*POW(input->lambdasq,k1+k2)/
(POW2(k1+k2)*factorial(k1)*factorial(k2));
}
}
// Add C once
if (j0==0) {
if (k1>0)
output->moments[i0][MOM_INDEX(k1,k2)]+=
output->centers[i0].x*
input->moments[i0][MOM_INDEX(k1-1,k2)];
if (k2>0)
output->moments[i0][MOM_INDEX(k1,k2)]+=
output->centers[i0].y*
input->moments[i0][MOM_INDEX(k1,k2-1)];
}
}
} // end for
} // end pragma omp parallel
#ifdef HVS_PROFILE
endtime = time(NULL);
timings.eval_equation += (int)(endtime-starttime);
timings.eval_equation = (int)(timings.eval_equation/2);
#endif
return HVS_OK;
}
int rk4_hvs_solve(hvs_state *curdata, FLOAT_TYPE tn, FLOAT_TYPE timestep, FLOAT_TYPE nu) {
hvs_ode_data k1, k2, k3, k4, kt;
int status = HVS_OK;
int i0,i,i1,i2;
#ifdef HVS_PROFILE
time_t starttime, endtime;
starttime = time(NULL);
#endif
#if HVS_DEBUG
printf("rk4_hvs_solve\n");
#endif
// Initialize temporary variable for moments and centers
if ((status=init_ode_data(&kt,curdata))!=HVS_OK) {
return status;
}
memcpy(kt.moments,curdata->moments,sizeof(hvs_moment)*curdata->ncenters);
memcpy(kt.centers,curdata->centers,sizeof(hvs_center)*curdata->ncenters);
// Initialize k1,k2,k3,k4 - rk4 function evaluations
if ((status=init_ode_data(&k1,curdata))!=HVS_OK) {
return status;
}
if ((status=init_ode_data(&k2,curdata))!=HVS_OK) {
return status;
}
if ((status=init_ode_data(&k3,curdata))!=HVS_OK) {
return status;
}
if ((status=init_ode_data(&k4,curdata))!=HVS_OK) {
return status;
}
// Get k1
if ((status=eval_eq(&kt, &k1, curdata->coefs, tn))!=HVS_OK) {
return status;
}
#if HVS_DEBUG>2
printf("m00=%f,m20=%f,m11=%f,m02=%f\n",k1.moments[0][0],k1.moments[0][3],k1.moments[0][4],k1.moments[0][5]);
printf("m00=%f,m20=%f,m11=%f,m02=%f\n",k1.moments[1][0],k1.moments[1][3],k1.moments[1][4],k1.moments[1][5]);
printf("y1=%.12f,y2=%.1f\n", k1.centers[0].y, k1.centers[1].y);
#endif
// Update kt
for (i0=0; i0<curdata->ncenters; i0++) {
for (i=0; i<NCOMBS; i++) {
i1=COMBS_IND1(i);
i2=COMBS_IND2(i);
kt.moments[i0][MOM_INDEX(i1,i2)] = curdata->moments[i0][MOM_INDEX(i1,i2)]+k1.moments[i0][MOM_INDEX(i1,i2)]*0.5*timestep;
}
kt.centers[i0].x = curdata->centers[i0].x+k1.centers[i0].x*0.5*timestep;
kt.centers[i0].y = curdata->centers[i0].y+k1.centers[i0].y*0.5*timestep;
}
kt.lambdasq += 4*0.5*timestep*nu;
// Get k2
if ((status=eval_eq(&kt, &k2, curdata->coefs, tn+0.5*timestep))!=HVS_OK) {
return status;
}
// Update kt
for (i0=0; i0<curdata->ncenters; i0++) {
for (i=0; i<NCOMBS; i++) {
i1=COMBS_IND1(i);
i2=COMBS_IND2(i);
kt.moments[i0][MOM_INDEX(i1,i2)] = curdata->moments[i0][MOM_INDEX(i1,i2)]+k2.moments[i0][MOM_INDEX(i1,i2)]*0.5*timestep;
}
kt.centers[i0].x = curdata->centers[i0].x+k2.centers[i0].x*0.5*timestep;
kt.centers[i0].y = curdata->centers[i0].y+k2.centers[i0].y*0.5*timestep;
}
// Get k3
if ((status=eval_eq(&kt, &k3, curdata->coefs, tn+0.5*timestep))!=HVS_OK) {
return status;
}
// Update kt
for (i0=0; i0<curdata->ncenters; i0++) {
for (i=0; i<NCOMBS; i++) {
i1=COMBS_IND1(i);
i2=COMBS_IND2(i);
kt.moments[i0][MOM_INDEX(i1,i2)] = curdata->moments[i0][MOM_INDEX(i1,i2)]+k3.moments[i0][MOM_INDEX(i1,i2)]*timestep;
}
kt.centers[i0].x = curdata->centers[i0].x+k3.centers[i0].x*timestep;
kt.centers[i0].y = curdata->centers[i0].y+k3.centers[i0].y*timestep;
}
kt.lambdasq += 4*0.5*timestep*nu;
// Get k4
if ((status=eval_eq(&kt, &k4, curdata->coefs, tn+timestep))!=HVS_OK) {
return status;
}
// Use k1,k2,k3,k4 to find rk4 value
for (i0=0; i0<curdata->ncenters; i0++) {
for (i=0; i<NCOMBS; i++) {
i1=COMBS_IND1(i);
i2=COMBS_IND2(i);
curdata->moments[i0][MOM_INDEX(i1,i2)] = curdata->moments[i0][MOM_INDEX(i1,i2)]+1.0/6*timestep*
(k1.moments[i0][MOM_INDEX(i1,i2)]+2*k2.moments[i0][MOM_INDEX(i1,i2)]+2*k3.moments[i0][MOM_INDEX(i1,i2)]+k4.moments[i0][MOM_INDEX(i1,i2)]);
}
curdata->centers[i0].x = curdata->centers[i0].x+1.0/6*timestep*(k1.centers[i0].x+2*k2.centers[i0].x+2*k3.centers[i0].x+k4.centers[i0].x);
curdata->centers[i0].y = curdata->centers[i0].y+1.0/6*timestep*(k1.centers[i0].y+2*k2.centers[i0].y+2*k3.centers[i0].y+k4.centers[i0].y);
}
curdata->lambdasq += 4.0*timestep*nu;
// Free all the Intermediate values
free_ode_data(&kt);
free_ode_data(&k1);
free_ode_data(&k2);
free_ode_data(&k3);
free_ode_data(&k4);
#if HVS_DEBUG>2
printf("m00=%f,m20=%f,m11=%f,m02=%f\n",curdata->moments[0][0],curdata->moments[0][3],curdata->moments[0][4],curdata->moments[0][5]);
printf("m00=%f,m20=%f,m11=%f,m02=%f\n",curdata->moments[1][0],curdata->moments[1][3],curdata->moments[1][4],curdata->moments[1][5]);
printf("y1=%f,y2=%f\n", curdata->centers[0].y, curdata->centers[1].y);
#endif
#ifdef HVS_PROFILE
endtime = time(NULL);
timings.rk_step += (int)(endtime-starttime);
timings.rk_step = (int)(timings.rk_step/2);
#endif
return HVS_OK;
}
int init_coefs(hvs_coefs *coefs) {
int k,k1,k2,m,m1,m2,l,l1,l2,i,j;
#ifdef HVS_PROFILE
time_t starttime, endtime;
starttime = time(NULL);
#endif
#if HVS_DEBUG
printf("init_coefs\n");
#endif
#pragma omp parallel for default(none) shared(coefs) private(k,k1,k2,l,l1,l2,m,m1,m2,i,j)
for(k=0;k<NCOMBS;k++) {
k1=COMBS_IND1(k);
k2=COMBS_IND2(k);
for(l=0;l<NCOMBS;l++) {
l1=COMBS_IND1(l);
l2=COMBS_IND2(l);
for(m=0;m<NCOMBS;m++) {
m1=COMBS_IND1(m);
m2=COMBS_IND2(m);
for(i=0;i<=NMOMENTS;i++)
for(j=0;j<=NMOMENTS;j++) {
if ((i<=MIN(l1,k1-1))&&(j<=MIN(l2,k2))) {
coefs->gamma1[COEF_INDEX(k1,k2,l1,l2,m1,m2,i,j)]=(FLOAT_TYPE)POWN1(l1+l2)*
binomial(l1,i)*binomial(l2,j)*(POW2(i+1)*factorial(k1))/factorial(k1-i-1)*
(POW2(j)*factorial(k2))/factorial(k2-j);
}
if ((i<=MIN(l1,k1))&&(j<=MIN(l2,k2-1))) {
coefs->gamma2[COEF_INDEX(k1,k2,l1,l2,m1,m2,i,j)]=(FLOAT_TYPE)POWN1(l1+l2)*
binomial(l1,i)*binomial(l2,j)*(POW2(i)*factorial(k1))/factorial(k1-i)*
(POW2(j+1)*factorial(k2))/factorial(k2-j-1);
}
}
}
}
}
#ifdef HVS_PROFILE
endtime = time(NULL);
timings.init_coefs = (int)(endtime-starttime);
#endif
}
int step_solver(hvs_state *state, FLOAT_TYPE *tn, const hvs_params *params) {
int i,status;
#if HVS_DEBUG
printf("step_solver, t=%f\n",(float)(*tn));
#endif
if ((status = rk4_hvs_solve(state, (*tn), params->timestep, params->nu))!=HVS_OK) {
return status;
}
(*tn) = (*tn)+params->timestep;
#if HVS_DEBUG>2
printf("%f=%f\n",state->lambdasq,(*tn)*4.0*params->nu+params->lambda0*params->lambda0);
#endif
return HVS_OK;
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate,
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *
startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange,
TypeSourceInfo *MethodType, SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Optional<std::pair<unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
ConceptDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
VarDecl *getVarTemplateSpecialization(
VarTemplateDecl *VarTempl, const TemplateArgumentListInfo *TemplateArgs,
const DeclarationNameInfo &MemberNameInfo, SourceLocation TemplateKWLoc);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckForDelayedContext = true);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPHostFunction(SourceLocation Loc, FunctionDecl *Callee,
bool CheckCaller = true);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis();
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Marks all the functions that might be required for the currently active
/// OpenMP context.
void markOpenMPDeclareVariantFuncsReferenced(SourceLocation Loc,
FunctionDecl *Func,
bool MightBeOdrUse);
public:
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \returns None, if the function/variant function are not compatible with
/// the pragma,pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
validate.c | /* Copyright (C) 2010-2011 The Trustees of Indiana University. */
/* */
/* Use, modification and distribution is subject to the Boost Software */
/* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */
/* http://www.boost.org/LICENSE_1_0.txt) */
/* */
/* Authors: Jeremiah Willcock */
/* Andrew Lumsdaine */
#include "onesided.h"
#include "common.h"
#include <mpi.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdlib.h>
#include <stddef.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
/* This code assumes signed shifts are arithmetic, which they are on
* practically all modern systems but is not guaranteed by C. */
static inline int64_t get_pred_from_pred_entry(int64_t val) {
return (val << 16) >> 16;
}
static inline uint16_t get_depth_from_pred_entry(int64_t val) {
return (val >> 48) & 0xFFFF;
}
static inline void write_pred_entry_depth(int64_t* loc, uint16_t depth) {
*loc = (*loc & INT64_C(0xFFFFFFFFFFFF)) | ((int64_t)(depth & 0xFFFF) << 48);
}
/* Returns true if all values are in range. */
static int check_value_ranges(const int64_t nglobalverts, const size_t nlocalverts, const int64_t* const pred) {
int any_range_errors = 0;
{
size_t ii;
for (ii = 0; ii < nlocalverts; ii += CHUNKSIZE) {
ptrdiff_t i_start = ii;
ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);
ptrdiff_t i;
assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);
assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);
#pragma omp parallel for reduction(||:any_range_errors)
for (i = i_start; i < i_end; ++i) {
int64_t p = get_pred_from_pred_entry(pred[i]);
if (p < -1 || p >= nglobalverts) {
fprintf(stderr, "%d: Validation error: parent of vertex %" PRId64 " is out-of-range value %" PRId64 ".\n", rank, vertex_to_global_for_pred(rank, i), p);
any_range_errors = 1;
}
}
}
}
MPI_Allreduce(MPI_IN_PLACE, &any_range_errors, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
return !any_range_errors;
}
/* Use the predecessors in the given map to write the BFS levels to the high 16
* bits of each element in pred; this also catches some problems in pred
* itself. Returns true if the predecessor map is valid. */
static int build_bfs_depth_map(const int64_t nglobalverts, const size_t nlocalverts, const size_t maxlocalverts, const int64_t root, int64_t* const pred) {
(void)nglobalverts;
int validation_passed = 1;
int root_owner;
size_t root_local;
get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local);
int root_is_mine = (root_owner == rank);
if (root_is_mine) assert (root_local < nlocalverts);
{
ptrdiff_t i;
#pragma omp parallel for
for (i = 0; i < (ptrdiff_t)nlocalverts; ++i) write_pred_entry_depth(&pred[i], UINT16_MAX);
if (root_is_mine) write_pred_entry_depth(&pred[root_local], 0);
}
int64_t* restrict pred_pred = (int64_t*)xMPI_Alloc_mem(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Predecessor info of predecessor vertex for each local vertex */
gather* pred_win = init_gather((void*)pred, nlocalverts, sizeof(int64_t), pred_pred, size_min(CHUNKSIZE, nlocalverts), size_min(CHUNKSIZE, nlocalverts), MPI_INT64_T);
int64_t* restrict pred_vtx = (int64_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Vertex (not depth) part of pred map */
int* restrict pred_owner = (int*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int));
size_t* restrict pred_local = (size_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(size_t));
int iter_number = 0;
{
/* Iteratively update depth[v] = min(depth[v], depth[pred[v]] + 1) [saturating at UINT16_MAX] until no changes. */
while (1) {
++iter_number;
int any_changes = 0;
ptrdiff_t ii;
for (ii = 0; ii < (ptrdiff_t)maxlocalverts; ii += CHUNKSIZE) {
ptrdiff_t i_start = ptrdiff_min(ii, nlocalverts);
ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);
begin_gather(pred_win);
ptrdiff_t i;
assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);
assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);
#pragma omp parallel for
for (i = i_start; i < i_end; ++i) {
pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);
}
get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);
#pragma omp parallel for
for (i = i_start; i < i_end; ++i) {
if (pred[i] != -1) {
add_gather_request(pred_win, i - i_start, pred_owner[i - i_start], pred_local[i - i_start], i - i_start);
} else {
pred_pred[i - i_start] = -1;
}
}
end_gather(pred_win);
#pragma omp parallel for reduction(&&:validation_passed) reduction(||:any_changes)
for (i = i_start; i < i_end; ++i) {
if (rank == root_owner && (size_t)i == root_local) continue;
if (get_depth_from_pred_entry(pred_pred[i - i_start]) != UINT16_MAX) {
if (get_depth_from_pred_entry(pred[i]) != UINT16_MAX && get_depth_from_pred_entry(pred[i]) != get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {
fprintf(stderr, "%d: Validation error: BFS predecessors do not form a tree; see vertices %" PRId64 " (depth %" PRIu16 ") and %" PRId64 " (depth %" PRIu16 ").\n", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]), get_pred_from_pred_entry(pred[i]), get_depth_from_pred_entry(pred_pred[i - i_start]));
validation_passed = 0;
} else if (get_depth_from_pred_entry(pred[i]) == get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {
/* Nothing to do */
} else {
write_pred_entry_depth(&pred[i], get_depth_from_pred_entry(pred_pred[i - i_start]) + 1);
any_changes = 1;
}
}
}
}
MPI_Allreduce(MPI_IN_PLACE, &any_changes, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD);
if (!any_changes) break;
}
}
destroy_gather(pred_win);
MPI_Free_mem(pred_pred);
free(pred_owner);
free(pred_local);
free(pred_vtx);
return validation_passed;
}
/* Check the BFS levels in pred against the predecessors given there. Returns
* true if the maps are valid. */
static int check_bfs_depth_map_using_predecessors(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const size_t maxlocalverts, const int64_t root, const int64_t* const pred) {
(void)nglobalverts; /* Avoid warning */
assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
assert (root >= 0 && root < nglobalverts);
assert (nglobalverts >= 0);
assert (pred);
int validation_passed = 1;
int root_owner;
size_t root_local;
get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local);
int root_is_mine = (root_owner == rank);
if (root_is_mine) assert (root_local < nlocalverts);
{
ptrdiff_t i;
if (root_is_mine && get_depth_from_pred_entry(pred[root_local]) != 0) {
fprintf(stderr, "%d: Validation error: depth of root vertex %" PRId64 " is %" PRIu16 ", not 0.\n", rank, root, get_depth_from_pred_entry(pred[root_local]));
validation_passed = 0;
}
#pragma omp parallel for reduction(&&:validation_passed)
for (i = 0; i < (ptrdiff_t)nlocalverts; ++i) {
if (get_pred_from_pred_entry(pred[i]) == -1 &&
get_depth_from_pred_entry(pred[i]) != UINT16_MAX) {
fprintf(stderr, "%d: Validation error: depth of vertex %" PRId64 " with no predecessor is %" PRIu16 ", not UINT16_MAX.\n", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]));
validation_passed = 0;
} else if (get_pred_from_pred_entry(pred[i]) != -1 &&
get_depth_from_pred_entry(pred[i]) == UINT16_MAX) {
fprintf(stderr, "%d: Validation error: predecessor of claimed unreachable vertex %" PRId64 " is %" PRId64 ", not -1.\n", rank, vertex_to_global_for_pred(rank, i), get_pred_from_pred_entry(pred[i]));
validation_passed = 0;
}
}
}
int64_t* restrict pred_pred = (int64_t*)xMPI_Alloc_mem(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Predecessor info of predecessor vertex for each local vertex */
gather* pred_win = init_gather((void*)pred, nlocalverts, sizeof(int64_t), pred_pred, size_min(CHUNKSIZE, nlocalverts), size_min(CHUNKSIZE, nlocalverts), MPI_INT64_T);
int64_t* restrict pred_vtx = (int64_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Vertex (not depth) part of pred map */
int* restrict pred_owner = (int*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int));
size_t* restrict pred_local = (size_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(size_t));
size_t ii;
for (ii = 0; ii < maxlocalverts; ii += CHUNKSIZE) {
ptrdiff_t i_start = ptrdiff_min(ii, nlocalverts);
ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);
begin_gather(pred_win);
ptrdiff_t i;
assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);
assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);
assert (i_end >= i_start);
assert (i_end - i_start >= 0 && i_end - i_start <= (ptrdiff_t)size_min(CHUNKSIZE, nlocalverts));
#pragma omp parallel for
for (i = i_start; i < i_end; ++i) {
pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);
}
get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);
#pragma omp parallel for
for (i = i_start; i < i_end; ++i) {
if (pred[i] != -1) {
add_gather_request(pred_win, i - i_start, pred_owner[i - i_start], pred_local[i - i_start], i - i_start);
} else {
pred_pred[i - i_start] = -1;
}
}
end_gather(pred_win);
#pragma omp parallel for reduction(&&:validation_passed)
for (i = i_start; i < i_end; ++i) {
if (rank == root_owner && (size_t)i == root_local) continue;
if (get_pred_from_pred_entry(pred[i]) == -1) continue; /* Already checked */
if (get_depth_from_pred_entry(pred_pred[i - i_start]) == UINT16_MAX) {
fprintf(stderr, "%d: Validation error: predecessor %" PRId64 " of vertex %" PRId64 " (depth %" PRIu16 ") is marked as unreachable.\n", rank, get_pred_from_pred_entry(pred[i]), vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]));
validation_passed = 0;
}
if (get_depth_from_pred_entry(pred[i]) != get_depth_from_pred_entry(pred_pred[i - i_start]) + 1) {
fprintf(stderr, "%d: Validation error: BFS predecessors do not form a tree; see vertices %" PRId64 " (depth %" PRIu16 ") and %" PRId64 " (depth %" PRIu16 ").\n", rank, vertex_to_global_for_pred(rank, i), get_depth_from_pred_entry(pred[i]), get_pred_from_pred_entry(pred[i]), get_depth_from_pred_entry(pred_pred[i - i_start]));
validation_passed = 0;
}
}
}
destroy_gather(pred_win);
MPI_Free_mem(pred_pred);
free(pred_owner);
free(pred_local);
free(pred_vtx);
return validation_passed;
}
/* Returns true if result is valid. Also, updates high 16 bits of each element
* of pred to contain the BFS level number (or -1 if not visited) of each
* vertex; this is based on the predecessor map if the user didn't provide it.
* */
int validate_bfs_result(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const int64_t root, int64_t* const pred, int64_t* const edge_visit_count_ptr) {
assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
assert (pred);
*edge_visit_count_ptr = 0; /* Ensure it is a valid pointer */
int ranges_ok = check_value_ranges(nglobalverts, nlocalverts, pred);
if (root < 0 || root >= nglobalverts) {
fprintf(stderr, "%d: Validation error: root vertex %" PRId64 " is invalid.\n", rank, root);
ranges_ok = 0;
}
if (!ranges_ok) return 0; /* Fail */
assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
assert (pred);
int validation_passed = 1;
int root_owner;
size_t root_local;
get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local);
int root_is_mine = (root_owner == rank);
/* Get maximum values so loop counts are consistent across ranks. */
uint64_t maxlocalverts_ui = nlocalverts;
MPI_Allreduce(MPI_IN_PLACE, &maxlocalverts_ui, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD);
size_t maxlocalverts = (size_t)maxlocalverts_ui;
ptrdiff_t max_bufsize = tuple_graph_max_bufsize(tg);
ptrdiff_t edge_chunk_size = ptrdiff_min(HALF_CHUNKSIZE, max_bufsize);
assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
assert (pred);
/* Check that root is its own parent. */
if (root_is_mine) {
assert (root_local < nlocalverts);
if (get_pred_from_pred_entry(pred[root_local]) != root) {
fprintf(stderr, "%d: Validation error: parent of root vertex %" PRId64 " is %" PRId64 ", not the root itself.\n", rank, root, get_pred_from_pred_entry(pred[root_local]));
validation_passed = 0;
}
}
assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
assert (pred);
/* Check that nothing else is its own parent. */
{
int* restrict pred_owner = (int*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int));
size_t* restrict pred_local = (size_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(size_t));
int64_t* restrict pred_vtx = (int64_t*)xmalloc(size_min(CHUNKSIZE, nlocalverts) * sizeof(int64_t)); /* Vertex (not depth) part of pred map */
ptrdiff_t ii;
for (ii = 0; ii < (ptrdiff_t)nlocalverts; ii += CHUNKSIZE) {
ptrdiff_t i_start = ii;
ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts);
ptrdiff_t i;
assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts);
assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts);
#pragma omp parallel for
for (i = i_start; i < i_end; ++i) {
pred_vtx[i - i_start] = get_pred_from_pred_entry(pred[i]);
}
get_vertex_distribution_for_pred(i_end - i_start, pred_vtx, pred_owner, pred_local);
#pragma omp parallel for reduction(&&:validation_passed)
for (i = i_start; i < i_end; ++i) {
if ((!root_is_mine || (size_t)i != root_local) &&
get_pred_from_pred_entry(pred[i]) != -1 &&
pred_owner[i - i_start] == rank &&
pred_local[i - i_start] == (size_t)i) {
fprintf(stderr, "%d: Validation error: parent of non-root vertex %" PRId64 " is itself.\n", rank, vertex_to_global_for_pred(rank, i));
validation_passed = 0;
}
}
}
free(pred_owner);
free(pred_local);
free(pred_vtx);
}
assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges);
assert (pred);
if (bfs_writes_depth_map()) {
int check_ok = check_bfs_depth_map_using_predecessors(tg, nglobalverts, nlocalverts, maxlocalverts, root, pred);
if (!check_ok) validation_passed = 0;
} else {
/* Create a vertex depth map to use for later validation. */
int pred_ok = build_bfs_depth_map(nglobalverts, nlocalverts, maxlocalverts, root, pred);
if (!pred_ok) validation_passed = 0;
}
{
/* Check that all edges connect vertices whose depths differ by at most
* one, and check that there is an edge from each vertex to its claimed
* predecessor. Also, count visited edges (including duplicates and
* self-loops). */
unsigned char* restrict pred_valid = (unsigned char*)xMPI_Alloc_mem(nlocalverts * sizeof(unsigned char));
memset(pred_valid, 0, nlocalverts * sizeof(unsigned char));
int64_t* restrict edge_endpoint = (int64_t*)xmalloc(2 * edge_chunk_size * sizeof(int64_t));
int* restrict edge_owner = (int*)xmalloc(2 * edge_chunk_size * sizeof(int));
size_t* restrict edge_local = (size_t*)xmalloc(2 * edge_chunk_size * sizeof(size_t));
int64_t* restrict edge_preds = (int64_t*)xMPI_Alloc_mem(2 * edge_chunk_size * sizeof(int64_t));
gather* pred_win = init_gather((void*)pred, nlocalverts, sizeof(int64_t), edge_preds, 2 * edge_chunk_size, 2 * edge_chunk_size, MPI_INT64_T);
unsigned char one = 1;
scatter_constant* pred_valid_win = init_scatter_constant((void*)pred_valid, nlocalverts, sizeof(unsigned char), &one, 2 * edge_chunk_size, MPI_UNSIGNED_CHAR);
int64_t edge_visit_count = 0;
ITERATE_TUPLE_GRAPH_BEGIN(tg, buf, bufsize) {
ptrdiff_t ii;
for (ii = 0; ii < max_bufsize; ii += HALF_CHUNKSIZE) {
ptrdiff_t i_start = ptrdiff_min(ii, bufsize);
ptrdiff_t i_end = ptrdiff_min(ii + HALF_CHUNKSIZE, bufsize);
assert (i_end - i_start <= edge_chunk_size);
ptrdiff_t i;
#pragma omp parallel for
for (i = i_start; i < i_end; ++i) {
int64_t v0 = get_v0_from_edge(&buf[i]);
int64_t v1 = get_v1_from_edge(&buf[i]);
edge_endpoint[(i - i_start) * 2 + 0] = v0;
edge_endpoint[(i - i_start) * 2 + 1] = v1;
}
get_vertex_distribution_for_pred(2 * (i_end - i_start), edge_endpoint, edge_owner, edge_local);
begin_gather(pred_win);
#pragma omp parallel for
for (i = i_start; i < i_end; ++i) {
add_gather_request(pred_win, (i - i_start) * 2 + 0, edge_owner[(i - i_start) * 2 + 0], edge_local[(i - i_start) * 2 + 0], (i - i_start) * 2 + 0);
add_gather_request(pred_win, (i - i_start) * 2 + 1, edge_owner[(i - i_start) * 2 + 1], edge_local[(i - i_start) * 2 + 1], (i - i_start) * 2 + 1);
}
end_gather(pred_win);
begin_scatter_constant(pred_valid_win);
#pragma omp parallel for reduction(&&:validation_passed) reduction(+:edge_visit_count)
for (i = i_start; i < i_end; ++i) {
int64_t src = get_v0_from_edge(&buf[i]);
int64_t tgt = get_v1_from_edge(&buf[i]);
uint16_t src_depth = get_depth_from_pred_entry(edge_preds[(i - i_start) * 2 + 0]);
uint16_t tgt_depth = get_depth_from_pred_entry(edge_preds[(i - i_start) * 2 + 1]);
if (src_depth != UINT16_MAX && tgt_depth == UINT16_MAX) {
fprintf(stderr, "%d: Validation error: edge connects vertex %" PRId64 " in the BFS tree (depth %" PRIu16 ") to vertex %" PRId64 " outside the tree.\n", rank, src, src_depth, tgt);
validation_passed = 0;
} else if (src_depth == UINT16_MAX && tgt_depth != UINT16_MAX) {
fprintf(stderr, "%d: Validation error: edge connects vertex %" PRId64 " in the BFS tree (depth %" PRIu16 ") to vertex %" PRId64 " outside the tree.\n", rank, tgt, tgt_depth, src);
validation_passed = 0;
} else if (src_depth - tgt_depth < -1 ||
src_depth - tgt_depth > 1) {
fprintf(stderr, "%d: Validation error: depths of edge endpoints %" PRId64 " (depth %" PRIu16 ") and %" PRId64 " (depth %" PRIu16 ") are too far apart (abs. val. > 1).\n", rank, src, src_depth, tgt, tgt_depth);
validation_passed = 0;
} else if (src_depth != UINT16_MAX) {
++edge_visit_count;
}
if (get_pred_from_pred_entry(edge_preds[(i - i_start) * 2 + 0]) == tgt) {
add_scatter_constant_request(pred_valid_win, edge_owner[(i - i_start) * 2 + 0], edge_local[(i - i_start) * 2 + 0], (i - i_start) * 2 + 0);
}
if (get_pred_from_pred_entry(edge_preds[(i - i_start) * 2 + 1]) == src) {
add_scatter_constant_request(pred_valid_win, edge_owner[(i - i_start) * 2 + 1], edge_local[(i - i_start) * 2 + 1], (i - i_start) * 2 + 1);
}
}
end_scatter_constant(pred_valid_win);
}
} ITERATE_TUPLE_GRAPH_END;
destroy_gather(pred_win);
MPI_Free_mem(edge_preds);
free(edge_owner);
free(edge_local);
free(edge_endpoint);
destroy_scatter_constant(pred_valid_win);
ptrdiff_t i;
#pragma omp parallel for reduction(&&:validation_passed)
for (i = 0; i < (ptrdiff_t)nlocalverts; ++i) {
int64_t p = get_pred_from_pred_entry(pred[i]);
if (p == -1) continue;
int found_pred_edge = pred_valid[i];
if (root_owner == rank && root_local == (size_t)i) found_pred_edge = 1; /* Root vertex */
if (!found_pred_edge) {
int64_t v = vertex_to_global_for_pred(rank, i);
fprintf(stderr, "%d: Validation error: no graph edge from vertex %" PRId64 " to its parent %" PRId64 ".\n", rank, v, get_pred_from_pred_entry(pred[i]));
validation_passed = 0;
}
}
MPI_Free_mem(pred_valid);
MPI_Allreduce(MPI_IN_PLACE, &edge_visit_count, 1, MPI_INT64_T, MPI_SUM, MPI_COMM_WORLD);
*edge_visit_count_ptr = edge_visit_count;
}
/* Collect the global validation result. */
MPI_Allreduce(MPI_IN_PLACE, &validation_passed, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);
return validation_passed;
}
|
DRB050-functionparameter-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
/*
Arrays passed as function parameters
*/
void foo1(double o1[], double c[], int len)
{
int i ;
#pragma omp parallel for firstprivate(c ,o1 ,i ,len )
for (i = 0; i < len; ++i) {
double volnew_o8 = 0.5 * c[i];
o1[i] = volnew_o8;
}
}
int main()
{
double o1[101];
double c[101];
int i;
int len = 100;
#pragma omp parallel for private(i )
for (i = 0; i < len; ++i) {
c[i] = i + 1.01;
o1[i] = i + 1.01;
}
foo1 (&o1[1], &o1[0], 100);
for (i = 0; i < len; ++i) {
printf("%lf\n",o1[i]);
}
return 0;
}
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN_TO_16 int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN_TO_16 uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN_TO_16 int8_t i8[16];
SIMDE_ALIGN_TO_16 int16_t i16[8];
SIMDE_ALIGN_TO_16 int32_t i32[4];
SIMDE_ALIGN_TO_16 int64_t i64[2];
SIMDE_ALIGN_TO_16 uint8_t u8[16];
SIMDE_ALIGN_TO_16 uint16_t u16[8];
SIMDE_ALIGN_TO_16 uint32_t u32[4];
SIMDE_ALIGN_TO_16 uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN_TO_16 simde_int128 i128[1];
SIMDE_ALIGN_TO_16 simde_uint128 u128[1];
#endif
SIMDE_ALIGN_TO_16 simde_float32 f32[4];
SIMDE_ALIGN_TO_16 int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN_TO_16 uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN_TO_16 simde__m64_private m64_private[2];
SIMDE_ALIGN_TO_16 simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN_TO_16 __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 int8x16_t neon_i8;
SIMDE_ALIGN_TO_16 int16x8_t neon_i16;
SIMDE_ALIGN_TO_16 int32x4_t neon_i32;
SIMDE_ALIGN_TO_16 int64x2_t neon_i64;
SIMDE_ALIGN_TO_16 uint8x16_t neon_u8;
SIMDE_ALIGN_TO_16 uint16x8_t neon_u16;
SIMDE_ALIGN_TO_16 uint32x4_t neon_u32;
SIMDE_ALIGN_TO_16 uint64x2_t neon_u64;
SIMDE_ALIGN_TO_16 float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN_TO_16 float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN_TO_16 v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN_TO_16 SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN_TO_16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, v128_t, wasm, v128);
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_round_ps (simde__m128 a, int rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndaq_f32(a_.neon_f32);
#elif defined(simde_math_roundf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN_TO_16 simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_nor(r_.altivec_f32, r_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_and(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_nand(vec_cmpeq(a_.altivec_f32, a_.altivec_f32), vec_cmpeq(b_.altivec_f32, b_.altivec_f32)));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */
r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0))));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_div(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
simde_memcpy(&r_, SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128), sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load1_ps (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load1_ps(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load1_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_reve(v_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) && defined(__PPC64__)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#else
simde_memcpy(&r_, mem_addr, sizeof(r_));
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && defined(SIMDE_FAST_NANS)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vbslq_f32(vcgtq_f32(a_.neon_f32, b_.neon_f32), a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE) && defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) && defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergel(b_.altivec_i64, a_.altivec_i64));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float),
vec_mergeh(a_.altivec_i64, b_.altivec_i64));
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_mul(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
# define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE)
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_rsqrte(a_.altivec_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_) && 0
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_sqrt(a_.altivec_f32);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
simde_memcpy(mem_addr, &a_, sizeof(a));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_float32* mem_addr_ = SIMDE_ALIGN_ASSUME_LIKE(mem_addr, simde__m128);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr_, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr_, vdupq_lane_f32(vget_low_f32(a_.neon_f32), 0));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr_, wasm_v32x4_shuffle(a_.wasm_v128, a_.wasm_v128, 0, 0, 0, 0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_splat(a_.altivec_f32, 0), 0, mem_addr_);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
simde__m128_private tmp_;
tmp_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 0, 0, 0, 0);
simde_mm_store_ps(mem_addr_, tmp_.f32);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr_:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr_[i] = a_.f32[0];
}
#endif
#endif
}
#define simde_mm_store_ps1(mem_addr, a) simde_mm_store1_ps(mem_addr, a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1_f32(HEDLEY_REINTERPRET_CAST(float32_t*, mem_addr), vget_high_f32(a_.neon_f32));
#else
simde_memcpy(mem_addr, &(a_.m64[1]), sizeof(a_.m64[1]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
vec_st(vec_reve(a_.altivec_f32), 0, mem_addr);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t tmp = vrev64q_f32(a_.neon_f32);
vst1q_f32(mem_addr, vextq_f32(tmp, tmp, 2));
#elif defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_sub(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_mergeh(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#elif HEDLEY_HAS_BUILTIN(__builtin_nontemporal_store) && defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
simde__m128_private a_ = simde__m128_to_private(a);
__builtin_nontemporal_store(a_.f32, SIMDE_ALIGN_CAST(__typeof__(a_.f32)*, mem_addr));
#else
simde_mm_store_ps(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
NQueensParallel.c | /**
* Author: https://www.dreamincode.net/forums/topic/336580-recursive-algorithm-for-n-queens-problem/
*/
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
unsigned int solutions;
void setQueen(int queens[], int row, int col, int size)
{
//check all previously placed rows for attacks
for(int i = 0; i < row; i++) {
// vertical clashes
if (queens[i] == col) {
return;
}
// diagonal clashes
if (abs(queens[i] - col) == (row - i)) {
return;
}
}
// no clashes found is ok, set the queen
queens[row] = col;
// if we're at the end of the rows
if(row == size - 1) {
#pragma omp atomic
solutions++; // found a solution
}
// else we'll try to fill next row
else {
for(int i = 0; i < size; i++) {
setQueen(queens, row + 1, i, size);
}
}
}
// function to find the solutions
void solve(int size)
{
#pragma omp parallel for
for(int i = 0; i < size; i++) {
// array representing queens placed on a chess board. Index is row, value is column.
int *queens = malloc(sizeof(int)*size);
setQueen(queens, 0, i, size);
free(queens);
}
}
int main(int argc, char* argv[])
{
double start_time, end_time;
int num_threads;
if (argc != 3){
printf("ERROR! Usage: ./executable size numThreads\n");
return EXIT_FAILURE;
}
num_threads = atoi(argv[2]);
int size = atoi(argv[1]);
omp_set_num_threads(num_threads);
start_time = omp_get_wtime();
solve(size);
// get end time
end_time = omp_get_wtime();
// print results
printf("Sequential Solution with a size of n = %d and %d Threads:\n", size, num_threads);
printf("The execution time is %g sec\n", end_time - start_time);
printf("Number of found solutions is %d\n", solutions);
return EXIT_SUCCESS;
} |
RaghavanVorpMaterial.c | /* This file is part of redbKIT.
* Copyright (c) 2016, Ecole Polytechnique Federale de Lausanne (EPFL)
* Author: Federico Negri <federico.negri@epfl.ch>
*/
#include "RaghavanVorpMaterial.h"
/*************************************************************************/
void RaghavanVorpMaterial_forces(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(nln*noe*dim,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln*noe*dim,1, mxREAL);
double* myRrows = mxGetPr(plhs[0]);
double* myRcoef = mxGetPr(plhs[1]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double* elements = mxGetPr(prhs[4]);
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double alpha = material_param[0];
double beta = material_param[1];
double bulk = material_param[2];
/*
double mu = Young / (2.0 + 2.0 * Poisson);
double lambda = Young * Poisson /( (1.0 + Poisson) * (1.0-2.0*Poisson) );
double bulk = ( 2.0 / 3.0 ) * mu + lambda;
*/
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,myRrows,myRcoef,U_h) private(ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,NumQuadPoints,numRowsElements,nln2,nln,NumNodes,Id,alpha,beta,bulk,noe,dim)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double I_C[NumQuadPoints];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double invFT[NumQuadPoints][dim][dim];
double C[NumQuadPoints][dim][dim];
double dP[dim][dim];
double P_Uh[dim][dim];
double GradV[dim][dim];
double GradUh[NumQuadPoints][dim][dim];
double gradphi[dim][nln][NumQuadPoints];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[q][d1][d2] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[q][d1][d2] = GradUh[q][d1][d2] + U_h[e_k] * gradphi[d2][k][q];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[q][d1][d2];
}
}
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
}
int iii = 0;
int ii = 0;
int a, b, i_c, j_c;
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < dim; i_c = i_c + 1 )
{
/* set gradV to zero*/
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[d1][d2] = 0;
}
}
double rloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[i_c][d2] = gradphi[d2][a][q];
}
double P1[dim][dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
P_Uh[d1][d2] = 2.0 * ( alpha + 2.0 * beta * ( pow23detF[q] * I_C[q] - 3.0 ) )
* pow23detF[q] * ( F[q][d1][d2] - 1.0 / 3.0 * I_C[q] * invFT[q][d1][d2] )
+ 1.0 / 2.0 * bulk * ( pow2detF[q] - detF[q] + logdetF[q] ) * invFT[q][d1][d2];
}
}
rloc = rloc + Mdot( dim, GradV, P_Uh) * w[q];
}
myRrows[ie*nln*dim+ii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myRcoef[ie*nln*dim+ii] = rloc*detjac[ie];
ii = ii + 1;
}
}
}
}
/*************************************************************************/
/*************************************************************************/
void RaghavanVorpMaterial_jacobian(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double* elements = mxGetPr(prhs[4]);
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double alpha = material_param[0];
double beta = material_param[1];
double bulk = material_param[2];
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,myAcols,myArows,myAcoef,U_h) private(ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,numRowsElements,nln2,nln,NumNodes,Id,alpha,beta,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double I_C[NumQuadPoints];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double invFT[NumQuadPoints][dim][dim];
double C[NumQuadPoints][dim][dim];
double dP[dim][dim];
double P_Uh[dim][dim];
double GradV[dim][dim];
double GradU[dim][dim];
double GradUh[NumQuadPoints][dim][dim];
double gradphi[dim][nln][NumQuadPoints];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[q][d1][d2] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[q][d1][d2] = GradUh[q][d1][d2] + U_h[e_k] * gradphi[d2][k][q];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[q][d1][d2];
}
}
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
}
int iii = 0;
int ii = 0;
int a, b, i_c, j_c;
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < dim; i_c = i_c + 1 )
{
/* set gradV to zero*/
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[d1][d2] = 0;
}
}
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < dim; j_c = j_c + 1 )
{
/* set gradU to zero*/
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradU[d1][d2] = 0;
}
}
double aloc = 0;
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradV[i_c][d2] = gradphi[d2][a][q];
GradU[j_c][d2] = gradphi[d2][b][q];
}
/* volumetric part */
double dP_vol[dim][dim];
double dP_vol1[dim][dim];
double dP_vol2_tmp[dim][dim];
double dP_vol2[dim][dim];
MatrixScalar(dim, 0.5*bulk * (2.0*pow2detF[q] -detF[q] + 1.0)*Mdot(dim, invFT[q], GradU),
invFT[q], dP_vol);
MatrixProductAlphaT2(dim, 0.5*bulk * ( - pow2detF[q] + detF[q] - logdetF[q]), invFT[q], GradU, dP_vol2_tmp);
MatrixProductAlpha(dim, 1.0, dP_vol2_tmp, invFT[q], dP_vol2);
MatrixSum(dim, dP_vol, dP_vol2);
/* isochoric part */
double dP_iso[dim][dim];
double dP_iso1[dim][dim];
double dP_iso24[dim][dim];
double dP_iso3[dim][dim];
double dP_iso5[dim][dim];
double dP_iso5_tmp[dim][dim];
double dP_iso5_tmp2[dim][dim];
double mu_q = 2.0 * ( alpha + 2.0 * beta * ( pow23detF[q] * I_C[q] - 3.0 ) );
MatrixScalar(dim, -2.0 / 3.0 * mu_q * pow23detF[q] * Mdot(dim, invFT[q], GradU),
F[q], dP_iso1);
MatrixScalar(dim, mu_q * pow23detF[q] *
( 2.0 / 9.0 * I_C[q] * Mdot(dim, invFT[q], GradU)
-2.0 / 3.0 * Mdot(dim, F[q], GradU) ),
invFT[q], dP_iso24);
MatrixScalar(dim, mu_q * pow23detF[q], GradU, dP_iso3);
MatrixProductAlphaT2(dim, 1.0, invFT[q], GradU, dP_iso5_tmp);
MatrixProductAlpha(dim, 1.0, dP_iso5_tmp, invFT[q], dP_iso5_tmp2);
MatrixScalar(dim, 1.0 / 3.0 * mu_q * pow23detF[q] * I_C[q] , dP_iso5_tmp2, dP_iso5);
/* multiplicative factor: */
double dP_iso6[dim][dim];
double dP_6_tmp[dim][dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
dP_6_tmp[d1][d2] = F[q][d1][d2] - 1.0 / 3.0 * I_C[q] * invFT[q][d1][d2] ;
}
}
double scalar = 2.0 * pow23detF[q] * 2.0 * beta * 2.0 * pow23detF[q] * Mdot(dim, dP_6_tmp, GradU);
MatrixScalar(dim, scalar , dP_6_tmp, dP_iso6);
/* Sum all contributes */
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
dP[d1][d2] = dP_vol[d1][d2]
+ dP_iso1[d1][d2]
+ dP_iso24[d1][d2]
+ dP_iso3[d1][d2]
+ dP_iso5[d1][d2]
+ dP_iso6[d1][d2];
}
}
aloc = aloc + Mdot( dim, GradV, dP) * w[q];
}
myArows[ie*nln2*dim*dim+iii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myAcols[ie*nln2*dim*dim+iii] = elements[b+ie*numRowsElements] + j_c * NumNodes;
myAcoef[ie*nln2*dim*dim+iii] = aloc*detjac[ie];
iii = iii + 1;
}
}
}
}
}
}
/*************************************************************************/
void RaghavanVorpMaterial_jacobianFast(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[1] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
plhs[2] = mxCreateDoubleMatrix(nln2*noe*dim*dim,1, mxREAL);
double* myArows = mxGetPr(plhs[0]);
double* myAcols = mxGetPr(plhs[1]);
double* myAcoef = mxGetPr(plhs[2]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double* elements = mxGetPr(prhs[4]);
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double alpha = material_param[0];
double beta = material_param[1];
double bulk = material_param[2];
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,myAcols,myArows,myAcoef,U_h) private(ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,numRowsElements,nln2,nln,NumNodes,Id,alpha,beta,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double I_C[NumQuadPoints];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double invFT[NumQuadPoints][dim][dim];
double C[NumQuadPoints][dim][dim];
double GradUh[NumQuadPoints][dim][dim];
double gradphi[NumQuadPoints][dim][nln];
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[q][d1][k] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[q][d1][k] = gradphi[q][d1][k] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < 3; d1 = d1 + 1 )
{
for (d2 = 0; d2 < 3; d2 = d2 + 1 )
{
GradUh[q][d1][d2] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[q][d1][d2] = GradUh[q][d1][d2] + U_h[e_k] * gradphi[q][d2][k];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[q][d1][d2];
}
}
detF[q] = MatrixDeterminant3(dim, F[q]);
MatrixInvT3(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
}
int iii = 0;
int a, b, i_c, j_c;
double aloc[nln][dim][nln][dim];
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < 3; j_c = j_c + 1 )
{
aloc[a][i_c][b][j_c] = 0.0;
}
}
}
}
for (q = 0; q < NumQuadPoints; q = q + 1 )
{
double mu_q = 2.0 * ( alpha + 2.0 * beta * ( pow23detF[q] * I_C[q] - 3.0 ) );
double vol_factor1 = 0.5*bulk * (2.0*pow2detF[q] -detF[q] + 1.0);
double vol_factor2 = 0.5*bulk * ( - pow2detF[q] + detF[q] - logdetF[q]);
double P_F[dim][dim];
for (d1 = 0; d1 < 3; d1 = d1 + 1 )
{
for (d2 = 0; d2 < 3; d2 = d2 + 1 )
{
P_F[d1][d2] = F[q][d1][d2] - 1.0 / 3.0 * I_C[q] * invFT[q][d1][d2] ;
}
}
/* loop over test functions --> a */
for (a = 0; a < nln; a = a + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
aloc[a][0][b][0] += ( gradphi[q][0][a]*(invFT[q][0][0]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][0]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][0]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][0][b] - (2*P_F[0][0]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][0]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][0][1]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][1]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][1]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][1][b] - (2*P_F[0][1]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][1]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][0][2]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][2]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][2]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][2][b] - (2*P_F[0][2]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][2]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][0][b][1] += ( gradphi[q][0][a]*(invFT[q][1][0]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][0]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][0]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) - (2*P_F[0][0]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][0]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][1][1]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][1]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][1]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) - (2*P_F[0][1]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][1]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][1][2]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][2]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][2]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) - (2*P_F[0][2]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][2]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][0][b][2] += ( gradphi[q][0][a]*(invFT[q][2][0]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][0]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][0]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) - (2*P_F[0][0]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][0]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][2][1]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][1]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][1]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) - (2*P_F[0][1]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][1]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][2][2]*(invFT[q][0][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][0][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][0][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][2]*((I_C[q]*invFT[q][0][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][0][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][0][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][0][2]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) - (2*P_F[0][2]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][0][2]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[0][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][1][b][0] += ( gradphi[q][0][a]*(invFT[q][0][0]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][0]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][0]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) - (2*P_F[1][0]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][0]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][0][1]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][1]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][1]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) - (2*P_F[1][1]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][1]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][0][2]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][2]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][2]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) - (2*P_F[1][2]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][2]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][1][b][1] += ( gradphi[q][0][a]*(invFT[q][1][0]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][0]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][0]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][0][b] - (2*P_F[1][0]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][0]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][1][1]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][1]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][1]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][1][b] - (2*P_F[1][1]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][1]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][1][2]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][2]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][2]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][2][b] - (2*P_F[1][2]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][2]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][1][b][2] += ( gradphi[q][0][a]*(invFT[q][2][0]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][0]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][0]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) - (2*P_F[1][0]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][0]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][2][1]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][1]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][1]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) - (2*P_F[1][1]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][1]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][2][2]*(invFT[q][1][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][1][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][1][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][2]*((I_C[q]*invFT[q][1][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][1][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][1][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][1][2]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) - (2*P_F[1][2]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][1][2]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[1][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][2][b][0] += ( gradphi[q][0][a]*(invFT[q][0][0]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][0]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][0]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) - (2*P_F[2][0]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][0]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][0][1]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][1]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][1]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) - (2*P_F[2][1]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][1]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][0][2]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][0][2]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][2]*vol_factor1*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]) - (2*P_F[2][2]*mu_q*pow23detF[q]*(invFT[q][0][0]*gradphi[q][0][b] + invFT[q][0][1]*gradphi[q][1][b] + invFT[q][0][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][2]*mu_q*pow23detF[q]*(F[q][0][0]*gradphi[q][0][b] + F[q][0][1]*gradphi[q][1][b] + F[q][0][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[0][0]*gradphi[q][0][b] + P_F[0][1]*gradphi[q][1][b] + P_F[0][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][2][b][1] += ( gradphi[q][0][a]*(invFT[q][1][0]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][0]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][0]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) - (2*P_F[2][0]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][0]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][1][1]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][1]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][1]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) - (2*P_F[2][1]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][1]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][1][2]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][1][2]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][2]*vol_factor1*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]) - (2*P_F[2][2]*mu_q*pow23detF[q]*(invFT[q][1][0]*gradphi[q][0][b] + invFT[q][1][1]*gradphi[q][1][b] + invFT[q][1][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][2]*mu_q*pow23detF[q]*(F[q][1][0]*gradphi[q][0][b] + F[q][1][1]*gradphi[q][1][b] + F[q][1][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[1][0]*gradphi[q][0][b] + P_F[1][1]*gradphi[q][1][b] + P_F[1][2]*gradphi[q][2][b])) ) * w[q];
aloc[a][2][b][2] += ( gradphi[q][0][a]*(invFT[q][2][0]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][0]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][0]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][0][b] - (2*P_F[2][0]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][0]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][0]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) + gradphi[q][1][a]*(invFT[q][2][1]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][1]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][1]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][1][b] - (2*P_F[2][1]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][1]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][1]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) + gradphi[q][2][a]*(invFT[q][2][2]*(invFT[q][2][0]*gradphi[q][0][b]*vol_factor2 + invFT[q][2][1]*gradphi[q][1][b]*vol_factor2 + invFT[q][2][2]*gradphi[q][2][b]*vol_factor2) + invFT[q][2][2]*((I_C[q]*invFT[q][2][0]*mu_q*pow23detF[q]*gradphi[q][0][b])/3.0 + (I_C[q]*invFT[q][2][1]*mu_q*pow23detF[q]*gradphi[q][1][b])/3.0 + (I_C[q]*invFT[q][2][2]*mu_q*pow23detF[q]*gradphi[q][2][b])/3.0) + invFT[q][2][2]*vol_factor1*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]) + mu_q*pow23detF[q]*gradphi[q][2][b] - (2*P_F[2][2]*mu_q*pow23detF[q]*(invFT[q][2][0]*gradphi[q][0][b] + invFT[q][2][1]*gradphi[q][1][b] + invFT[q][2][2]*gradphi[q][2][b]))/3.0 - (2*invFT[q][2][2]*mu_q*pow23detF[q]*(F[q][2][0]*gradphi[q][0][b] + F[q][2][1]*gradphi[q][1][b] + F[q][2][2]*gradphi[q][2][b]))/3.0 + 8*P_F[2][2]*beta*pow23detF[q]*pow23detF[q]*(P_F[2][0]*gradphi[q][0][b] + P_F[2][1]*gradphi[q][1][b] + P_F[2][2]*gradphi[q][2][b])) ) * w[q];
}
}
}
for (a = 0; a < nln; a = a + 1 )
{
/* loop over test components --> i_c */
for (i_c = 0; i_c < 3; i_c = i_c + 1 )
{
/* loop over trial functions --> b */
for (b = 0; b < nln; b = b + 1 )
{
/* loop over trial components --> j_c */
for (j_c = 0; j_c < 3; j_c = j_c + 1 )
{
myArows[ie*nln2*9+iii] = elements[a+ie*numRowsElements] + i_c * NumNodes;
myAcols[ie*nln2*9+iii] = elements[b+ie*numRowsElements] + j_c * NumNodes;
myAcoef[ie*nln2*9+iii] = aloc[a][i_c][b][j_c]*detjac[ie];
iii = iii + 1;
}
}
}
}
}
}
/*************************************************************************/
void RaghavanVorpMaterial_stress(mxArray* plhs[], const mxArray* prhs[])
{
double* dim_ptr = mxGetPr(prhs[0]);
int dim = (int)(dim_ptr[0]);
int noe = mxGetN(prhs[4]);
double* nln_ptr = mxGetPr(prhs[5]);
int nln = (int)(nln_ptr[0]);
int numRowsElements = mxGetM(prhs[4]);
int nln2 = nln*nln;
plhs[0] = mxCreateDoubleMatrix(noe,dim*dim, mxREAL);
plhs[1] = mxCreateDoubleMatrix(noe,dim*dim, mxREAL);
double* P = mxGetPr(plhs[0]);
double* Sigma = mxGetPr(plhs[1]);
int k,l;
int q;
int NumQuadPoints = mxGetN(prhs[6]);
int NumNodes = (int)(mxGetM(prhs[3]) / dim);
double* U_h = mxGetPr(prhs[3]);
double* w = mxGetPr(prhs[6]);
double* invjac = mxGetPr(prhs[7]);
double* detjac = mxGetPr(prhs[8]);
double* phi = mxGetPr(prhs[9]);
double* gradrefphi = mxGetPr(prhs[10]);
double gradphi[dim][nln][NumQuadPoints];
double* elements = mxGetPr(prhs[4]);
double GradUh[dim][dim][NumQuadPoints];
double Id[dim][dim];
int d1,d2;
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Id[d1][d2] = 0;
if (d1==d2)
{
Id[d1][d2] = 1;
}
}
}
double* material_param = mxGetPr(prhs[2]);
double alpha = material_param[0];
double beta = material_param[1];
double bulk = material_param[2];
/* Assembly: loop over the elements */
int ie;
#pragma omp parallel for shared(invjac,detjac,elements,Sigma,U_h) private(gradphi,GradUh,ie,k,l,q,d1,d2) firstprivate(phi,gradrefphi,w,numRowsElements,nln2,nln,NumNodes,Id,alpha,beta,bulk)
for (ie = 0; ie < noe; ie = ie + 1 )
{
double traceE[NumQuadPoints];
double F[NumQuadPoints][dim][dim];
double P_Uh[dim][dim];
double invFT[NumQuadPoints][dim][dim];
double detF[NumQuadPoints];
double logdetF[NumQuadPoints];
double pow2detF[NumQuadPoints];
double pow23detF[NumQuadPoints];
double C[NumQuadPoints][dim][dim];
double I_C[NumQuadPoints];
q = 0;
/* Compute Gradient of Basis functions*/
for (k = 0; k < nln; k = k + 1 )
{
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
gradphi[d1][k][q] = 0;
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
gradphi[d1][k][q] = gradphi[d1][k][q] + INVJAC(ie,d1,d2)*GRADREFPHI(k,q,d2);
}
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
GradUh[d1][d2][q] = 0;
for (k = 0; k < nln; k = k + 1 )
{
int e_k;
e_k = (int)(elements[ie*numRowsElements + k] + d1*NumNodes - 1);
GradUh[d1][d2][q] = GradUh[d1][d2][q] + U_h[e_k] * gradphi[d2][k][q];
}
F[q][d1][d2] = Id[d1][d2] + GradUh[d1][d2][q];
}
}
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
logdetF[q] = log( detF[q] );
detF[q] = MatrixDeterminant(dim, F[q]);
MatrixInvT(dim, F[q], invFT[q] );
MatrixProductAlphaT1(dim, 1.0, F[q], F[q], C[q] );
logdetF[q] = log( detF[q] );
pow23detF[q] = pow(detF[q], -2.0 / 3.0);
pow2detF[q] = pow(detF[q], 2.0);
I_C[q] = Trace(dim, C[q]);
double P1[dim][dim];
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
P_Uh[d1][d2] = 2.0 * ( alpha + 2.0 * beta * ( pow23detF[q] * I_C[q] - 3.0 ) )
* pow23detF[q] * ( F[q][d1][d2] - 1.0 / 3.0 * I_C[q] * invFT[q][d1][d2] )
+ 1.0 / 2.0 * bulk * ( pow2detF[q] - detF[q] + logdetF[q] ) * invFT[q][d1][d2];
}
}
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
P[ie+(d1+d2*dim)*noe] = P_Uh[d1][d2] ;
}
}
double Sigma_tmp[dim][dim];
/* Sigma = 1 / det(F) * P * F^T */
MatrixProductAlphaT2(dim, 1.0 / detF[q], P_Uh, F[q], Sigma_tmp );
for (d1 = 0; d1 < dim; d1 = d1 + 1 )
{
for (d2 = 0; d2 < dim; d2 = d2 + 1 )
{
Sigma[ie+(d1+d2*dim)*noe] = Sigma_tmp[d1][d2] ;
}
}
}
}
/*************************************************************************/ |
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <dmlc/common.h>
#include <xgboost/data.h>
#include <utility>
#include <vector>
#include <type_traits> // enable_if
#include "xgboost/host_device_vector.h"
#include "xgboost/span.h"
#include "common.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif // defined (__CUDACC__)
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, int device, bool shard) :
func_(func), range_{std::move(range)},
shard_{shard},
device_{device} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = device_ >= 0;
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDVOnDevice(HostDeviceVector<T>* _vec) const {
auto span = _vec->DeviceSpan();
return span;
}
template <typename T>
Span<T const> UnpackHDVOnDevice(const HostDeviceVector<T>* _vec) const {
auto span = _vec->ConstDeviceSpan();
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive unpack for Shard.
template <typename T>
void UnpackShard(int device, const HostDeviceVector<T> *vector) const {
vector->SetDevice(device);
}
template <typename Head, typename... Rest>
void UnpackShard(int device,
const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->SetDevice(device);
UnpackShard(device, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (shard_)
UnpackShard(device_, _vectors...);
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = range_size;
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device_));
const int kGrids =
static_cast<int>(DivRoundUp(*(range_.end()), kBlockThreads));
if (kGrids == 0) {
return;
}
detail::LaunchCUDAKernel<<<kGrids, kBlockThreads>>>( // NOLINT
_func, shard_range, UnpackHDVOnDevice(_vectors)...);
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif // defined(__CUDACC__)
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
dmlc::OMPException omp_exc;
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
omp_exc.Run(func, idx, UnpackHDV(vectors)...);
}
omp_exc.Rethrow();
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether sharding for vectors is required. */
bool shard_;
int device_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param device Specify GPU to use.
* \param shard Whether Shard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
int device,
bool const shard = true) {
return Evaluator<Functor> {func, std::move(range), device, shard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
bit_vector_functions.h | #ifndef BIT_VECTOR_FUNCTIONS_H
#define BIT_VECTOR_FUNCTIONS_H
#include <vector>
#include <bitset>
#include "helper/confusion.h"
#include "config.h"
#include "io_and_allocation.h"
#include "updates_and_measures.h"
using std::vector;
template<typename bit_vector_t, typename index_t>
size_t computeHammingDistanceCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
size_t error = 0;
#pragma omp parallel for reduction(+:error)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
error += product ^ C_ij;
}
}
return error;
}
template<typename bit_vector_t>
int nonzeroDimension(vector<bit_vector_t>& Ab)
{
bit_vector_t columns = 0;
for(auto& a : Ab) columns |= a;
std::bitset<std::numeric_limits<bit_vector_t>::digits> bits(columns);
return bits.count();
}
template<typename bit_vector_t, typename index_t>
confusion_matrix computeErrorsCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
size_t true_positives = 0;
size_t true_negatives = 0;
size_t false_positives = 0;
size_t false_negatives = 0;
#pragma omp parallel for reduction(+:true_positives) \
reduction(+:true_negatives) \
reduction(+:false_positives) \
reduction(+:false_negatives)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
true_positives += C_ij & product;
true_negatives += !(C_ij | product);
false_positives += (!C_ij) & product;
false_negatives += C_ij & !product;
}
}
return confusion_matrix(true_positives, true_negatives, false_positives, false_negatives);
}
template<typename bit_vector_t, typename index_t>
size_t computeTruePositiveCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
size_t true_positives = 0;
#pragma omp parallel for reduction(+:true_positives)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
if(product & C_ij) true_positives++;
}
}
return true_positives;
}
template<typename bit_vector_t, typename index_t>
float computeJaccardCPU(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
float jaccard = 0;
#pragma omp parallel for reduction(+:jaccard)
for(index_t j=0; j < width; ++j) {
uint32_t B_j = Bb[j];
size_t true_positives = 0;
size_t false_positives = 0;
size_t false_negatives = 0;
for(index_t i=0; i < height; ++i) {
const int product = (Ab[i] & B_j) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
if(product) {
if(C_ij)
true_positives++;
else
false_positives++;
} else {
if(C_ij)
false_negatives++;
}
}
jaccard += (float) true_positives / (true_positives + false_positives + false_negatives);
}
return jaccard;
}
template<typename bit_factor_t, typename bit_matrix_t, typename index_t, typename error_t>
error_t computeDistanceCPU(
const vector<bit_factor_t> &Ab,
const vector<bit_factor_t> &Bb,
const vector<bit_matrix_t> &Cb,
const index_t height,
const index_t width,
const error_t weight)
{
error_t error = 0;
#pragma omp parallel for reduction(+:error)
for(index_t i=0; i < height; ++i) {
uint32_t A_i = Ab[i];
for(index_t j=0; j < width; ++j) {
const int product = (A_i & Bb[j]) ? 1 : 0;
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
error += error_measure(product, C_ij, weight);
}
}
return error;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeDensitiesRows(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> density_rows(height);
#pragma omp parallel for
for(index_t i=0; i<height; ++i) {
size_t nonZeroCount = 0;
for(index_t j=0; j<width; ++j) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
density_rows[i] = (error_t) nonZeroCount / width;
}
return density_rows;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeDensitiesCols(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> density_cols(width);
#pragma omp parallel for
for(index_t j=0; j<width; ++j) {
size_t nonZeroCount = 0;
for(index_t i=0; i<height; ++i) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
density_cols[j] = (error_t) nonZeroCount / height;
}
return density_cols;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeInverseDensitiesRows(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> inverse_density_rows(height);
#pragma omp parallel for
for(index_t i=0; i<height; ++i) {
size_t nonZeroCount = 0;
for(index_t j=0; j<width; ++j) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
if(nonZeroCount == 0) nonZeroCount++;
inverse_density_rows[i] = (error_t) width / nonZeroCount;
}
return inverse_density_rows;
}
template<typename bit_vector_t, typename index_t, typename error_t = float>
vector<error_t> computeInverseDensitiesCols(
const vector<bit_vector_t> &Cb,
const index_t height,
const index_t width)
{
vector<error_t> inverse_density_cols(width);
#pragma omp parallel for
for(index_t j=0; j<width; ++j) {
size_t nonZeroCount = 0;
for(index_t i=0; i<height; ++i) {
const index_t vecId = i / 32 * width + j;
const index_t vecLane = i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
nonZeroCount += C_ij;
}
if(nonZeroCount == 0) nonZeroCount++;
inverse_density_cols[j] = (error_t) height / nonZeroCount;
}
return inverse_density_cols;
}
template<typename bit_vector_t, typename index_t>
void updateWholeColumn(
vector<bit_vector_t> &Ab,
const index_t size_A,
const uint8_t factorDim,
const uint8_t column,
const float density,
const uint32_t seed)
{
updateColumnPart(Ab, size_A, factorDim, column, density, 0, size_A, seed);
}
template<typename bit_vector_t, typename index_t>
void updateColumnPart(
vector<bit_vector_t> &Ab,
const index_t size_A,
const uint8_t factorDim,
const uint8_t column,
const float density,
const index_t startline,
const index_t numlines,
const uint32_t seed)
{
const double threshold = getInitChance(density, factorDim);
#pragma omp for
for (index_t id = 0; id < numlines; ++id) {
const index_t i = (startline + id) % size_A;
fast_kiss_state32_t state;
state = get_initial_fast_kiss_state32(seed + i);
const bool set_one = fast_kiss32(state) < threshold * UINT32_MAX;
if (set_one)
Ab[i] |= 1 << column;
else //set 0
Ab[i] &= ~(1 << column);
}
}
template<bool transpose, typename bit_vector_t, typename index_t>
confusion_matrix optimizeWholeColumn(
vector<bit_vector_t> &Ab,
const index_t size_A,
const vector<bit_vector_t> &Bb,
const index_t size_B,
const vector<bit_vector_t> &Cb,
const uint8_t factorDim,
const uint8_t k)
{
confusion_matrix confusion_new;
#pragma omp for
for (index_t i = 0; i < size_A; ++i) {
const bit_vector_t A_i_0 = Ab[i] & ~(1 << k);
const bit_vector_t A_i_1 = Ab[i] | (1 << k);
confusion_matrix confusion_0;
confusion_matrix confusion_1;
for(index_t j=0; j < size_B; ++j) {
const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j;
const index_t vecLane = transpose ? j % 32 : i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
const int product_0 = (A_i_0 & Bb[j]) ? 1 : 0;
const int product_1 = (A_i_1 & Bb[j]) ? 1 : 0;
confusion_0.TP += C_ij & product_0;
confusion_1.TP += C_ij & product_1;
confusion_0.FN += C_ij & !product_0;
confusion_1.FN += C_ij & !product_1;
confusion_0.FP += (!C_ij) & product_0;
confusion_1.FP += (!C_ij) & product_1;
}
if(confusion_0.total_error() <= confusion_1.total_error()) {
Ab[i] = A_i_0;
confusion_new.TP += confusion_0.TP;
confusion_new.FN += confusion_0.FN;
confusion_new.FP += confusion_0.FP;
}
else {
Ab[i] = A_i_1;
confusion_new.TP += confusion_1.TP;
confusion_new.FN += confusion_1.FN;
confusion_new.FP += confusion_1.FP;
}
}
return confusion_new;
}
template<bool transpose, typename bit_vector_t, typename index_t>
confusion_matrix updateLinesJaccardCPU(vector<bit_vector_t> &Ab,
const index_t size_A,
const vector<bit_vector_t> &Bb,
const index_t size_B,
const vector<bit_vector_t> &Cb,
const uint8_t factorDim,
const index_t startline,
const index_t numlines,
const uint32_t seed,
const float temperature,
const float flipManyChance,
const uint32_t flipManyDepth,
const confusion_matrix confusion)
{
confusion_matrix confusion_update;
#pragma omp for
for(index_t id=0; id < numlines; ++id) {
const index_t i = (startline + id) % size_A;
fast_kiss_state32_t state;
state = get_initial_fast_kiss_state32(seed + id);
const bit_vector_t A_i = Ab[i];
const bit_vector_t A_i_draw = get_flip_mask_many(factorDim, state, flipManyDepth);
const bit_vector_t A_i_flip = A_i ^ A_i_draw;
confusion_matrix confusion_old;
confusion_matrix confusion_draw;
confusion_matrix confusion_flip;
for(index_t j=0; j < size_B; ++j) {
const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j;
const index_t vecLane = transpose ? j % 32 : i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
const int product_old = (A_i & Bb[j]) ? 1 : 0;
const int product_draw = (A_i_draw & Bb[j]) ? 1 : 0;
const int product_flip = (A_i_flip & Bb[j]) ? 1 : 0;
confusion_old.TP += C_ij & product_old;
confusion_draw.TP += C_ij & product_draw;
confusion_flip.TP += C_ij & product_flip;
confusion_old.FN += C_ij & !product_old;
confusion_draw.FN += C_ij & !product_draw;
confusion_flip.FN += C_ij & !product_flip;
confusion_old.FP += (!C_ij) & product_old;
confusion_draw.FP += (!C_ij) & product_draw;
confusion_flip.FP += (!C_ij) & product_flip;
}
const size_t all_tp_draw = confusion.TP - confusion_old.TP + confusion_draw.TP;
const size_t all_tp_flip = confusion.TP - confusion_old.TP + confusion_flip.TP;
const float jaccard_old = 1.0f * confusion.TP / (confusion.TP + 3*confusion_old.FN + confusion_old.FP);
const float jaccard_draw = 1.0f * all_tp_draw / (all_tp_draw + 3*confusion_draw.FN + confusion_draw.FP);
const float jaccard_flip = 1.0f * all_tp_flip / (all_tp_flip + 3*confusion_flip.FN + confusion_flip.FP);
bit_vector_t A_i_new = A_i_draw;
float jaccard_new = jaccard_draw;
confusion_matrix& confusion_new = confusion_draw;
if(jaccard_draw > jaccard_old) {
if(jaccard_flip > jaccard_draw) {
A_i_new = A_i_flip;
jaccard_new = jaccard_flip;
confusion_new = confusion_flip;
}
} else {
if(jaccard_flip > jaccard_old) {
A_i_new = A_i_flip;
jaccard_new = jaccard_flip;
confusion_new = confusion_flip;
} else {
const uint32_t coin = fast_kiss32(state) % 2;
if(coin) {
A_i_new = A_i_flip;
jaccard_new = jaccard_flip;
confusion_new = confusion_flip;
}
}
}
if (metro(state, jaccard_old - jaccard_new, temperature)) {
Ab[i] = A_i_new;
confusion_update.TP += confusion_new.TP - confusion_old.TP;
confusion_update.FP += confusion_new.FP - confusion_old.FP;
confusion_update.FN += confusion_new.FN - confusion_old.FN;
}
}
return confusion_update;
}
template<bool transpose, typename bit_vector_t, typename index_t, typename error_t>
int vectorMatrixMultCompareLineCPU(vector<bit_vector_t> &Ab,
const index_t size_A,
const vector<bit_vector_t> &Bb,
const index_t size_B,
const vector<bit_vector_t> &Cb,
const uint8_t factorDim,
const index_t startline,
const index_t numlines,
const uint32_t seed,
const float temperature,
const float flipManyChance,
const uint32_t flipManyDepth,
const error_t weight)
{
error_t error_update = 0;
#pragma omp for
// #pragma omp parallel for reduction(+:error_update)
for(index_t id=0; id < numlines; ++id) {
const index_t i = (startline + id) % size_A;
fast_kiss_state32_t state;
state = get_initial_fast_kiss_state32(seed + id);
const bit_vector_t A_i = Ab[i];
bit_vector_t A_i_changed = Ab[i] ^ get_flip_mask(factorDim, state, flipManyChance, flipManyDepth);
error_t error = 0;
for(index_t j=0; j < size_B; ++j) {
const index_t vecId = transpose ? j / 32 * size_A + i : i / 32 * size_B + j;
const index_t vecLane = transpose ? j % 32 : i % 32;
const int C_ij = (Cb[vecId] >> vecLane) & 1;
const int product_old = (A_i & Bb[j]) ? 1 : 0;
const int product_new = (A_i_changed & Bb[j]) ? 1 : 0;
error += error_measure(product_new, C_ij, weight)
- error_measure(product_old, C_ij, weight);
}
if (metro(state, error, temperature, size_B)) {
Ab[i] = A_i_changed;
error_update += error;
}
}
return error_update;
}
template <typename index_t>
struct coo {
coo(index_t x, index_t y) : x_{x}, y_{y} {}
index_t x_;
index_t y_;
};
template <typename bit_vector_t, typename index_t>
vector<coo<index_t>> computeProductCOO(
const vector<bit_vector_t> &Ab,
const vector<bit_vector_t> &Bb,
const index_t height,
const index_t width)
{
vector<coo<index_t>> C;
#pragma omp parallel for ordered schedule(static,1)
for(index_t i=0; i < height; ++i) {
bit_vector_t row = Ab[i];
vector<coo<index_t>> Ci;
for(index_t j=0; j < width; ++j) {
if(row & Bb[j])
Ci.emplace_back(i,j);
}
#pragma omp ordered
C.insert(C.end(), Ci.begin(), Ci.end());
}
return C;
}
#endif
|
divsufsort.c | /*
* divsufsort.c for libdivsufsort-lite
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*- Compiler specifics -*/
#ifdef __clang__
#pragma clang diagnostic ignored "-Wshorten-64-to-32"
#endif
#if defined(_MSC_VER)
# pragma warning(disable : 4244)
# pragma warning(disable : 4127) /* C4127 : Condition expression is constant */
#endif
/*- Dependencies -*/
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include "divsufsort.h"
/*- Constants -*/
#if defined(INLINE)
# undef INLINE
#endif
#if !defined(INLINE)
# define INLINE __inline
#endif
#if defined(ALPHABET_SIZE) && (ALPHABET_SIZE < 1)
# undef ALPHABET_SIZE
#endif
#if !defined(ALPHABET_SIZE)
# define ALPHABET_SIZE (256)
#endif
#define BUCKET_A_SIZE (ALPHABET_SIZE)
#define BUCKET_B_SIZE (ALPHABET_SIZE * ALPHABET_SIZE)
#if defined(SS_INSERTIONSORT_THRESHOLD)
# if SS_INSERTIONSORT_THRESHOLD < 1
# undef SS_INSERTIONSORT_THRESHOLD
# define SS_INSERTIONSORT_THRESHOLD (1)
# endif
#else
# define SS_INSERTIONSORT_THRESHOLD (8)
#endif
#if defined(SS_BLOCKSIZE)
# if SS_BLOCKSIZE < 0
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (0)
# elif 32768 <= SS_BLOCKSIZE
# undef SS_BLOCKSIZE
# define SS_BLOCKSIZE (32767)
# endif
#else
# define SS_BLOCKSIZE (1024)
#endif
/* minstacksize = log(SS_BLOCKSIZE) / log(3) * 2 */
#if SS_BLOCKSIZE == 0
# define SS_MISORT_STACKSIZE (96)
#elif SS_BLOCKSIZE <= 4096
# define SS_MISORT_STACKSIZE (16)
#else
# define SS_MISORT_STACKSIZE (24)
#endif
#define SS_SMERGE_STACKSIZE (32)
#define TR_INSERTIONSORT_THRESHOLD (8)
#define TR_STACKSIZE (64)
/*- Macros -*/
#ifndef SWAP
# define SWAP(_a, _b) do { t = (_a); (_a) = (_b); (_b) = t; } while(0)
#endif /* SWAP */
#ifndef MIN
# define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif /* MIN */
#ifndef MAX
# define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b))
#endif /* MAX */
#define STACK_PUSH(_a, _b, _c, _d)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize++].d = (_d);\
} while(0)
#define STACK_PUSH5(_a, _b, _c, _d, _e)\
do {\
assert(ssize < STACK_SIZE);\
stack[ssize].a = (_a), stack[ssize].b = (_b),\
stack[ssize].c = (_c), stack[ssize].d = (_d), stack[ssize++].e = (_e);\
} while(0)
#define STACK_POP(_a, _b, _c, _d)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d;\
} while(0)
#define STACK_POP5(_a, _b, _c, _d, _e)\
do {\
assert(0 <= ssize);\
if(ssize == 0) { return; }\
(_a) = stack[--ssize].a, (_b) = stack[ssize].b,\
(_c) = stack[ssize].c, (_d) = stack[ssize].d, (_e) = stack[ssize].e;\
} while(0)
#define BUCKET_A(_c0) bucket_A[(_c0)]
#if ALPHABET_SIZE == 256
#define BUCKET_B(_c0, _c1) (bucket_B[((_c1) << 8) | (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[((_c0) << 8) | (_c1)])
#else
#define BUCKET_B(_c0, _c1) (bucket_B[(_c1) * ALPHABET_SIZE + (_c0)])
#define BUCKET_BSTAR(_c0, _c1) (bucket_B[(_c0) * ALPHABET_SIZE + (_c1)])
#endif
/*- Private Functions -*/
static const int lg_table[256]= {
-1,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,
5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
};
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
int
ss_ilg(int n) {
#if SS_BLOCKSIZE == 0
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
#elif SS_BLOCKSIZE < 256
return lg_table[n];
#else
return (n & 0xff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff];
#endif
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
#if SS_BLOCKSIZE != 0
static const int sqq_table[256] = {
0, 16, 22, 27, 32, 35, 39, 42, 45, 48, 50, 53, 55, 57, 59, 61,
64, 65, 67, 69, 71, 73, 75, 76, 78, 80, 81, 83, 84, 86, 87, 89,
90, 91, 93, 94, 96, 97, 98, 99, 101, 102, 103, 104, 106, 107, 108, 109,
110, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126,
128, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142,
143, 144, 144, 145, 146, 147, 148, 149, 150, 150, 151, 152, 153, 154, 155, 155,
156, 157, 158, 159, 160, 160, 161, 162, 163, 163, 164, 165, 166, 167, 167, 168,
169, 170, 170, 171, 172, 173, 173, 174, 175, 176, 176, 177, 178, 178, 179, 180,
181, 181, 182, 183, 183, 184, 185, 185, 186, 187, 187, 188, 189, 189, 190, 191,
192, 192, 193, 193, 194, 195, 195, 196, 197, 197, 198, 199, 199, 200, 201, 201,
202, 203, 203, 204, 204, 205, 206, 206, 207, 208, 208, 209, 209, 210, 211, 211,
212, 212, 213, 214, 214, 215, 215, 216, 217, 217, 218, 218, 219, 219, 220, 221,
221, 222, 222, 223, 224, 224, 225, 225, 226, 226, 227, 227, 228, 229, 229, 230,
230, 231, 231, 232, 232, 233, 234, 234, 235, 235, 236, 236, 237, 237, 238, 238,
239, 240, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 246, 246, 247,
247, 248, 248, 249, 249, 250, 250, 251, 251, 252, 252, 253, 253, 254, 254, 255
};
static INLINE
int
ss_isqrt(int x) {
int y, e;
if(x >= (SS_BLOCKSIZE * SS_BLOCKSIZE)) { return SS_BLOCKSIZE; }
e = (x & 0xffff0000) ?
((x & 0xff000000) ?
24 + lg_table[(x >> 24) & 0xff] :
16 + lg_table[(x >> 16) & 0xff]) :
((x & 0x0000ff00) ?
8 + lg_table[(x >> 8) & 0xff] :
0 + lg_table[(x >> 0) & 0xff]);
if(e >= 16) {
y = sqq_table[x >> ((e - 6) - (e & 1))] << ((e >> 1) - 7);
if(e >= 24) { y = (y + 1 + x / y) >> 1; }
y = (y + 1 + x / y) >> 1;
} else if(e >= 8) {
y = (sqq_table[x >> ((e - 6) - (e & 1))] >> (7 - (e >> 1))) + 1;
} else {
return sqq_table[x] >> 4;
}
return (x < (y * y)) ? y - 1 : y;
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Compares two suffixes. */
static INLINE
int
ss_compare(const unsigned char *T,
const int *p1, const int *p2,
int depth) {
const unsigned char *U1, *U2, *U1n, *U2n;
for(U1 = T + depth + *p1,
U2 = T + depth + *p2,
U1n = T + *(p1 + 1) + 2,
U2n = T + *(p2 + 1) + 2;
(U1 < U1n) && (U2 < U2n) && (*U1 == *U2);
++U1, ++U2) {
}
return U1 < U1n ?
(U2 < U2n ? *U1 - *U2 : 1) :
(U2 < U2n ? -1 : 0);
}
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1)
/* Insertionsort for small size groups */
static
void
ss_insertionsort(const unsigned char *T, const int *PA,
int *first, int *last, int depth) {
int *i, *j;
int t;
int r;
for(i = last - 2; first <= i; --i) {
for(t = *i, j = i + 1; 0 < (r = ss_compare(T, PA + t, PA + *j, depth));) {
do { *(j - 1) = *j; } while((++j < last) && (*j < 0));
if(last <= j) { break; }
}
if(r == 0) { *j = ~*j; }
*(j - 1) = t;
}
}
#endif /* (SS_BLOCKSIZE != 1) && (SS_INSERTIONSORT_THRESHOLD != 1) */
/*---------------------------------------------------------------------------*/
#if (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE)
static INLINE
void
ss_fixdown(const unsigned char *Td, const int *PA,
int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for(v = SA[i], c = Td[PA[v]]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = Td[PA[SA[k = j++]]];
if(d < (e = Td[PA[SA[j]]])) { k = j; d = e; }
if(d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
ss_heapsort(const unsigned char *Td, const int *PA, int *SA, int size) {
int i, m;
int t;
m = size;
if((size % 2) == 0) {
m--;
if(Td[PA[SA[m / 2]]] < Td[PA[SA[m]]]) { SWAP(SA[m], SA[m / 2]); }
}
for(i = m / 2 - 1; 0 <= i; --i) { ss_fixdown(Td, PA, SA, i, m); }
if((size % 2) == 0) { SWAP(SA[0], SA[m]); ss_fixdown(Td, PA, SA, 0, m); }
for(i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
ss_fixdown(Td, PA, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
ss_median3(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3) {
int *t;
if(Td[PA[*v1]] > Td[PA[*v2]]) { SWAP(v1, v2); }
if(Td[PA[*v2]] > Td[PA[*v3]]) {
if(Td[PA[*v1]] > Td[PA[*v3]]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
ss_median5(const unsigned char *Td, const int *PA,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if(Td[PA[*v2]] > Td[PA[*v3]]) { SWAP(v2, v3); }
if(Td[PA[*v4]] > Td[PA[*v5]]) { SWAP(v4, v5); }
if(Td[PA[*v2]] > Td[PA[*v4]]) { SWAP(v2, v4); SWAP(v3, v5); }
if(Td[PA[*v1]] > Td[PA[*v3]]) { SWAP(v1, v3); }
if(Td[PA[*v1]] > Td[PA[*v4]]) { SWAP(v1, v4); SWAP(v3, v5); }
if(Td[PA[*v3]] > Td[PA[*v4]]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
ss_pivot(const unsigned char *Td, const int *PA, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if(t <= 512) {
if(t <= 32) {
return ss_median3(Td, PA, first, middle, last - 1);
} else {
t >>= 2;
return ss_median5(Td, PA, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = ss_median3(Td, PA, first, first + t, first + (t << 1));
middle = ss_median3(Td, PA, middle - t, middle, middle + t);
last = ss_median3(Td, PA, last - 1 - (t << 1), last - 1 - t, last - 1);
return ss_median3(Td, PA, first, middle, last);
}
/*---------------------------------------------------------------------------*/
/* Binary partition for substrings. */
static INLINE
int *
ss_partition(const int *PA,
int *first, int *last, int depth) {
int *a, *b;
int t;
for(a = first - 1, b = last;;) {
for(; (++a < b) && ((PA[*a] + depth) >= (PA[*a + 1] + 1));) { *a = ~*a; }
for(; (a < --b) && ((PA[*b] + depth) < (PA[*b + 1] + 1));) { }
if(b <= a) { break; }
t = ~*b;
*b = *a;
*a = t;
}
if(first < a) { *first = ~*first; }
return a;
}
/* Multikey introsort for medium size groups. */
static
void
ss_mintrosort(const unsigned char *T, const int *PA,
int *first, int *last,
int depth) {
#define STACK_SIZE SS_MISORT_STACKSIZE
struct { int *a, *b, c; int d; } stack[STACK_SIZE];
const unsigned char *Td;
int *a, *b, *c, *d, *e, *f;
int s, t;
int ssize;
int limit;
int v, x = 0;
for(ssize = 0, limit = ss_ilg(last - first);;) {
if((last - first) <= SS_INSERTIONSORT_THRESHOLD) {
#if 1 < SS_INSERTIONSORT_THRESHOLD
if(1 < (last - first)) { ss_insertionsort(T, PA, first, last, depth); }
#endif
STACK_POP(first, last, depth, limit);
continue;
}
Td = T + depth;
if(limit-- == 0) { ss_heapsort(Td, PA, first, last - first); }
if(limit < 0) {
for(a = first + 1, v = Td[PA[*first]]; a < last; ++a) {
if((x = Td[PA[*a]]) != v) {
if(1 < (a - first)) { break; }
v = x;
first = a;
}
}
if(Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, a, depth);
}
if((a - first) <= (last - a)) {
if(1 < (a - first)) {
STACK_PUSH(a, last, depth, -1);
last = a, depth += 1, limit = ss_ilg(a - first);
} else {
first = a, limit = -1;
}
} else {
if(1 < (last - a)) {
STACK_PUSH(first, a, depth + 1, ss_ilg(a - first));
first = a, limit = -1;
} else {
last = a, depth += 1, limit = ss_ilg(a - first);
}
}
continue;
}
/* choose pivot */
a = ss_pivot(Td, PA, first, last);
v = Td[PA[*a]];
SWAP(*first, *a);
/* partition */
for(b = first; (++b < last) && ((x = Td[PA[*b]]) == v);) { }
if(((a = b) < last) && (x < v)) {
for(; (++b < last) && ((x = Td[PA[*b]]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
}
for(c = last; (b < --c) && ((x = Td[PA[*c]]) == v);) { }
if((b < (d = c)) && (x > v)) {
for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
for(; b < c;) {
SWAP(*b, *c);
for(; (++b < c) && ((x = Td[PA[*b]]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
for(; (b < --c) && ((x = Td[PA[*c]]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
if(a <= d) {
c = b - 1;
if((s = a - first) > (t = b - a)) { s = t; }
for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if((s = d - c) > (t = last - d - 1)) { s = t; }
for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
a = first + (b - a), c = last - (d - c);
b = (v <= Td[PA[*a] - 1]) ? a : ss_partition(PA, a, c, depth);
if((a - first) <= (last - c)) {
if((last - c) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(c, last, depth, limit);
last = a;
} else if((a - first) <= (c - b)) {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
last = a;
} else {
STACK_PUSH(c, last, depth, limit);
STACK_PUSH(first, a, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
} else {
if((a - first) <= (c - b)) {
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
STACK_PUSH(first, a, depth, limit);
first = c;
} else if((last - c) <= (c - b)) {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(b, c, depth + 1, ss_ilg(c - b));
first = c;
} else {
STACK_PUSH(first, a, depth, limit);
STACK_PUSH(c, last, depth, limit);
first = b, last = c, depth += 1, limit = ss_ilg(c - b);
}
}
} else {
limit += 1;
if(Td[PA[*first] - 1] < v) {
first = ss_partition(PA, first, last, depth);
limit = ss_ilg(last - first);
}
depth += 1;
}
}
#undef STACK_SIZE
}
#endif /* (SS_BLOCKSIZE == 0) || (SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE) */
/*---------------------------------------------------------------------------*/
#if SS_BLOCKSIZE != 0
static INLINE
void
ss_blockswap(int *a, int *b, int n) {
int t;
for(; 0 < n; --n, ++a, ++b) {
t = *a, *a = *b, *b = t;
}
}
static INLINE
void
ss_rotate(int *first, int *middle, int *last) {
int *a, *b, t;
int l, r;
l = middle - first, r = last - middle;
for(; (0 < l) && (0 < r);) {
if(l == r) { ss_blockswap(first, middle, l); break; }
if(l < r) {
a = last - 1, b = middle - 1;
t = *a;
do {
*a-- = *b, *b-- = *a;
if(b < first) {
*a = t;
last = a;
if((r -= l + 1) <= l) { break; }
a -= 1, b = middle - 1;
t = *a;
}
} while(1);
} else {
a = first, b = middle;
t = *a;
do {
*a++ = *b, *b++ = *a;
if(last <= b) {
*a = t;
first = a + 1;
if((l -= r + 1) <= r) { break; }
a += 1, b = middle;
t = *a;
}
} while(1);
}
}
}
/*---------------------------------------------------------------------------*/
static
void
ss_inplacemerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int depth) {
const int *p;
int *a, *b;
int len, half;
int q, r;
int x;
for(;;) {
if(*(last - 1) < 0) { x = 1; p = PA + ~*(last - 1); }
else { x = 0; p = PA + *(last - 1); }
for(a = first, len = middle - first, half = len >> 1, r = -1;
0 < len;
len = half, half >>= 1) {
b = a + half;
q = ss_compare(T, PA + ((0 <= *b) ? *b : ~*b), p, depth);
if(q < 0) {
a = b + 1;
half -= (len & 1) ^ 1;
} else {
r = q;
}
}
if(a < middle) {
if(r == 0) { *a = ~*a; }
ss_rotate(a, middle, last);
last -= middle - a;
middle = a;
if(first == middle) { break; }
}
--last;
if(x != 0) { while(*--last < 0) { } }
if(middle == last) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Merge-forward with internal buffer. */
static
void
ss_mergeforward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
int *a, *b, *c, *bufend;
int t;
int r;
bufend = buf + (middle - first) - 1;
ss_blockswap(buf, first, middle - first);
for(t = *(a = first), b = buf, c = middle;;) {
r = ss_compare(T, PA + *b, PA + *c, depth);
if(r < 0) {
do {
*a++ = *b;
if(bufend <= b) { *bufend = t; return; }
*b++ = *a;
} while(*b < 0);
} else if(r > 0) {
do {
*a++ = *c, *c++ = *a;
if(last <= c) {
while(b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while(*c < 0);
} else {
*c = ~*c;
do {
*a++ = *b;
if(bufend <= b) { *bufend = t; return; }
*b++ = *a;
} while(*b < 0);
do {
*a++ = *c, *c++ = *a;
if(last <= c) {
while(b < bufend) { *a++ = *b, *b++ = *a; }
*a = *b, *b = t;
return;
}
} while(*c < 0);
}
}
}
/* Merge-backward with internal buffer. */
static
void
ss_mergebackward(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int depth) {
const int *p1, *p2;
int *a, *b, *c, *bufend;
int t;
int r;
int x;
bufend = buf + (last - middle) - 1;
ss_blockswap(buf, middle, last - middle);
x = 0;
if(*bufend < 0) { p1 = PA + ~*bufend; x |= 1; }
else { p1 = PA + *bufend; }
if(*(middle - 1) < 0) { p2 = PA + ~*(middle - 1); x |= 2; }
else { p2 = PA + *(middle - 1); }
for(t = *(a = last - 1), b = bufend, c = middle - 1;;) {
r = ss_compare(T, p1, p2, depth);
if(0 < r) {
if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
*a-- = *b;
if(b <= buf) { *buf = t; break; }
*b-- = *a;
if(*b < 0) { p1 = PA + ~*b; x |= 1; }
else { p1 = PA + *b; }
} else if(r < 0) {
if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
*a-- = *c, *c-- = *a;
if(c < first) {
while(buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if(*c < 0) { p2 = PA + ~*c; x |= 2; }
else { p2 = PA + *c; }
} else {
if(x & 1) { do { *a-- = *b, *b-- = *a; } while(*b < 0); x ^= 1; }
*a-- = ~*b;
if(b <= buf) { *buf = t; break; }
*b-- = *a;
if(x & 2) { do { *a-- = *c, *c-- = *a; } while(*c < 0); x ^= 2; }
*a-- = *c, *c-- = *a;
if(c < first) {
while(buf < b) { *a-- = *b, *b-- = *a; }
*a = *b, *b = t;
break;
}
if(*b < 0) { p1 = PA + ~*b; x |= 1; }
else { p1 = PA + *b; }
if(*c < 0) { p2 = PA + ~*c; x |= 2; }
else { p2 = PA + *c; }
}
}
}
/* D&C based merge. */
static
void
ss_swapmerge(const unsigned char *T, const int *PA,
int *first, int *middle, int *last,
int *buf, int bufsize, int depth) {
#define STACK_SIZE SS_SMERGE_STACKSIZE
#define GETIDX(a) ((0 <= (a)) ? (a) : (~(a)))
#define MERGE_CHECK(a, b, c)\
do {\
if(((c) & 1) ||\
(((c) & 2) && (ss_compare(T, PA + GETIDX(*((a) - 1)), PA + *(a), depth) == 0))) {\
*(a) = ~*(a);\
}\
if(((c) & 4) && ((ss_compare(T, PA + GETIDX(*((b) - 1)), PA + *(b), depth) == 0))) {\
*(b) = ~*(b);\
}\
} while(0)
struct { int *a, *b, *c; int d; } stack[STACK_SIZE];
int *l, *r, *lm, *rm;
int m, len, half;
int ssize;
int check, next;
for(check = 0, ssize = 0;;) {
if((last - middle) <= bufsize) {
if((first < middle) && (middle < last)) {
ss_mergebackward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
if((middle - first) <= bufsize) {
if(first < middle) {
ss_mergeforward(T, PA, first, middle, last, buf, depth);
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
continue;
}
for(m = 0, len = MIN(middle - first, last - middle), half = len >> 1;
0 < len;
len = half, half >>= 1) {
if(ss_compare(T, PA + GETIDX(*(middle + m + half)),
PA + GETIDX(*(middle - m - half - 1)), depth) < 0) {
m += half + 1;
half -= (len & 1) ^ 1;
}
}
if(0 < m) {
lm = middle - m, rm = middle + m;
ss_blockswap(lm, middle, m);
l = r = middle, next = 0;
if(rm < last) {
if(*rm < 0) {
*rm = ~*rm;
if(first < lm) { for(; *--l < 0;) { } next |= 4; }
next |= 1;
} else if(first < lm) {
for(; *r < 0; ++r) { }
next |= 2;
}
}
if((l - first) <= (last - r)) {
STACK_PUSH(r, rm, last, (next & 3) | (check & 4));
middle = lm, last = l, check = (check & 3) | (next & 4);
} else {
if((next & 2) && (r == middle)) { next ^= 6; }
STACK_PUSH(first, lm, l, (check & 3) | (next & 4));
first = r, middle = rm, check = (next & 3) | (check & 4);
}
} else {
if(ss_compare(T, PA + GETIDX(*(middle - 1)), PA + *middle, depth) == 0) {
*middle = ~*middle;
}
MERGE_CHECK(first, last, check);
STACK_POP(first, middle, last, check);
}
}
#undef STACK_SIZE
}
#endif /* SS_BLOCKSIZE != 0 */
/*---------------------------------------------------------------------------*/
/* Substring sort */
static
void
sssort(const unsigned char *T, const int *PA,
int *first, int *last,
int *buf, int bufsize,
int depth, int n, int lastsuffix) {
int *a;
#if SS_BLOCKSIZE != 0
int *b, *middle, *curbuf;
int j, k, curbufsize, limit;
#endif
int i;
if(lastsuffix != 0) { ++first; }
#if SS_BLOCKSIZE == 0
ss_mintrosort(T, PA, first, last, depth);
#else
if((bufsize < SS_BLOCKSIZE) &&
(bufsize < (last - first)) &&
(bufsize < (limit = ss_isqrt(last - first)))) {
if(SS_BLOCKSIZE < limit) { limit = SS_BLOCKSIZE; }
buf = middle = last - limit, bufsize = limit;
} else {
middle = last, limit = 0;
}
for(a = first, i = 0; SS_BLOCKSIZE < (middle - a); a += SS_BLOCKSIZE, ++i) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, a + SS_BLOCKSIZE, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, a + SS_BLOCKSIZE, depth);
#endif
curbufsize = last - (a + SS_BLOCKSIZE);
curbuf = a + SS_BLOCKSIZE;
if(curbufsize <= bufsize) { curbufsize = bufsize, curbuf = buf; }
for(b = a, k = SS_BLOCKSIZE, j = i; j & 1; b -= k, k <<= 1, j >>= 1) {
ss_swapmerge(T, PA, b - k, b, b + k, curbuf, curbufsize, depth);
}
}
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, a, middle, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, a, middle, depth);
#endif
for(k = SS_BLOCKSIZE; i != 0; k <<= 1, i >>= 1) {
if(i & 1) {
ss_swapmerge(T, PA, a - k, a, middle, buf, bufsize, depth);
a -= k;
}
}
if(limit != 0) {
#if SS_INSERTIONSORT_THRESHOLD < SS_BLOCKSIZE
ss_mintrosort(T, PA, middle, last, depth);
#elif 1 < SS_BLOCKSIZE
ss_insertionsort(T, PA, middle, last, depth);
#endif
ss_inplacemerge(T, PA, first, middle, last, depth);
}
#endif
if(lastsuffix != 0) {
/* Insert last type B* suffix. */
int PAi[2]; PAi[0] = PA[*(first - 1)], PAi[1] = n - 2;
for(a = first, i = *(first - 1);
(a < last) && ((*a < 0) || (0 < ss_compare(T, &(PAi[0]), PA + *a, depth)));
++a) {
*(a - 1) = *a;
}
*(a - 1) = i;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
int
tr_ilg(int n) {
return (n & 0xffff0000) ?
((n & 0xff000000) ?
24 + lg_table[(n >> 24) & 0xff] :
16 + lg_table[(n >> 16) & 0xff]) :
((n & 0x0000ff00) ?
8 + lg_table[(n >> 8) & 0xff] :
0 + lg_table[(n >> 0) & 0xff]);
}
/*---------------------------------------------------------------------------*/
/* Simple insertionsort for small size groups. */
static
void
tr_insertionsort(const int *ISAd, int *first, int *last) {
int *a, *b;
int t, r;
for(a = first + 1; a < last; ++a) {
for(t = *a, b = a - 1; 0 > (r = ISAd[t] - ISAd[*b]);) {
do { *(b + 1) = *b; } while((first <= --b) && (*b < 0));
if(b < first) { break; }
}
if(r == 0) { *b = ~*b; }
*(b + 1) = t;
}
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_fixdown(const int *ISAd, int *SA, int i, int size) {
int j, k;
int v;
int c, d, e;
for(v = SA[i], c = ISAd[v]; (j = 2 * i + 1) < size; SA[i] = SA[k], i = k) {
d = ISAd[SA[k = j++]];
if(d < (e = ISAd[SA[j]])) { k = j; d = e; }
if(d <= c) { break; }
}
SA[i] = v;
}
/* Simple top-down heapsort. */
static
void
tr_heapsort(const int *ISAd, int *SA, int size) {
int i, m;
int t;
m = size;
if((size % 2) == 0) {
m--;
if(ISAd[SA[m / 2]] < ISAd[SA[m]]) { SWAP(SA[m], SA[m / 2]); }
}
for(i = m / 2 - 1; 0 <= i; --i) { tr_fixdown(ISAd, SA, i, m); }
if((size % 2) == 0) { SWAP(SA[0], SA[m]); tr_fixdown(ISAd, SA, 0, m); }
for(i = m - 1; 0 < i; --i) {
t = SA[0], SA[0] = SA[i];
tr_fixdown(ISAd, SA, 0, i);
SA[i] = t;
}
}
/*---------------------------------------------------------------------------*/
/* Returns the median of three elements. */
static INLINE
int *
tr_median3(const int *ISAd, int *v1, int *v2, int *v3) {
int *t;
if(ISAd[*v1] > ISAd[*v2]) { SWAP(v1, v2); }
if(ISAd[*v2] > ISAd[*v3]) {
if(ISAd[*v1] > ISAd[*v3]) { return v1; }
else { return v3; }
}
return v2;
}
/* Returns the median of five elements. */
static INLINE
int *
tr_median5(const int *ISAd,
int *v1, int *v2, int *v3, int *v4, int *v5) {
int *t;
if(ISAd[*v2] > ISAd[*v3]) { SWAP(v2, v3); }
if(ISAd[*v4] > ISAd[*v5]) { SWAP(v4, v5); }
if(ISAd[*v2] > ISAd[*v4]) { SWAP(v2, v4); SWAP(v3, v5); }
if(ISAd[*v1] > ISAd[*v3]) { SWAP(v1, v3); }
if(ISAd[*v1] > ISAd[*v4]) { SWAP(v1, v4); SWAP(v3, v5); }
if(ISAd[*v3] > ISAd[*v4]) { return v4; }
return v3;
}
/* Returns the pivot element. */
static INLINE
int *
tr_pivot(const int *ISAd, int *first, int *last) {
int *middle;
int t;
t = last - first;
middle = first + t / 2;
if(t <= 512) {
if(t <= 32) {
return tr_median3(ISAd, first, middle, last - 1);
} else {
t >>= 2;
return tr_median5(ISAd, first, first + t, middle, last - 1 - t, last - 1);
}
}
t >>= 3;
first = tr_median3(ISAd, first, first + t, first + (t << 1));
middle = tr_median3(ISAd, middle - t, middle, middle + t);
last = tr_median3(ISAd, last - 1 - (t << 1), last - 1 - t, last - 1);
return tr_median3(ISAd, first, middle, last);
}
/*---------------------------------------------------------------------------*/
typedef struct _trbudget_t trbudget_t;
struct _trbudget_t {
int chance;
int remain;
int incval;
int count;
};
static INLINE
void
trbudget_init(trbudget_t *budget, int chance, int incval) {
budget->chance = chance;
budget->remain = budget->incval = incval;
}
static INLINE
int
trbudget_check(trbudget_t *budget, int size) {
if(size <= budget->remain) { budget->remain -= size; return 1; }
if(budget->chance == 0) { budget->count += size; return 0; }
budget->remain += budget->incval - size;
budget->chance -= 1;
return 1;
}
/*---------------------------------------------------------------------------*/
static INLINE
void
tr_partition(const int *ISAd,
int *first, int *middle, int *last,
int **pa, int **pb, int v) {
int *a, *b, *c, *d, *e, *f;
int t, s;
int x = 0;
for(b = middle - 1; (++b < last) && ((x = ISAd[*b]) == v);) { }
if(((a = b) < last) && (x < v)) {
for(; (++b < last) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
}
for(c = last; (b < --c) && ((x = ISAd[*c]) == v);) { }
if((b < (d = c)) && (x > v)) {
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
for(; b < c;) {
SWAP(*b, *c);
for(; (++b < c) && ((x = ISAd[*b]) <= v);) {
if(x == v) { SWAP(*b, *a); ++a; }
}
for(; (b < --c) && ((x = ISAd[*c]) >= v);) {
if(x == v) { SWAP(*c, *d); --d; }
}
}
if(a <= d) {
c = b - 1;
if((s = a - first) > (t = b - a)) { s = t; }
for(e = first, f = b - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
if((s = d - c) > (t = last - d - 1)) { s = t; }
for(e = b, f = last - s; 0 < s; --s, ++e, ++f) { SWAP(*e, *f); }
first += (b - a), last -= (d - c);
}
*pa = first, *pb = last;
}
static
void
tr_copy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
/* sort suffixes of middle partition
by using sorted order of suffixes of left and right partition. */
int *c, *d, *e;
int s, v;
v = b - SA - 1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
ISA[s] = d - SA;
}
}
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
ISA[s] = d - SA;
}
}
}
static
void
tr_partialcopy(int *ISA, const int *SA,
int *first, int *a, int *b, int *last,
int depth) {
int *c, *d, *e;
int s, v;
int rank, lastrank, newrank = -1;
v = b - SA - 1;
lastrank = -1;
for(c = first, d = a - 1; c <= d; ++c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*++d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
lastrank = -1;
for(e = d; first <= e; --e) {
rank = ISA[*e];
if(lastrank != rank) { lastrank = rank; newrank = e - SA; }
if(newrank != rank) { ISA[*e] = newrank; }
}
lastrank = -1;
for(c = last - 1, e = d + 1, d = b; e < d; --c) {
if((0 <= (s = *c - depth)) && (ISA[s] == v)) {
*--d = s;
rank = ISA[s + depth];
if(lastrank != rank) { lastrank = rank; newrank = d - SA; }
ISA[s] = newrank;
}
}
}
static
void
tr_introsort(int *ISA, const int *ISAd,
int *SA, int *first, int *last,
trbudget_t *budget) {
#define STACK_SIZE TR_STACKSIZE
struct { const int *a; int *b, *c; int d, e; }stack[STACK_SIZE];
int *a, *b, *c;
int t;
int v, x = 0;
int incr = ISAd - ISA;
int limit, next;
int ssize, trlink = -1;
for(ssize = 0, limit = tr_ilg(last - first);;) {
if(limit < 0) {
if(limit == -1) {
/* tandem repeat partition */
tr_partition(ISAd - incr, first, first, last, &a, &b, last - SA - 1);
/* update ranks */
if(a < last) {
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
}
if(b < last) {
for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; }
}
/* push */
if(1 < (b - a)) {
STACK_PUSH5(NULL, a, b, 0, 0);
STACK_PUSH5(ISAd - incr, first, last, -2, trlink);
trlink = ssize - 2;
}
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, tr_ilg(last - b), trlink);
last = a, limit = tr_ilg(a - first);
} else if(1 < (last - b)) {
first = b, limit = tr_ilg(last - b);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, tr_ilg(a - first), trlink);
first = b, limit = tr_ilg(last - b);
} else if(1 < (a - first)) {
last = a, limit = tr_ilg(a - first);
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else if(limit == -2) {
/* tandem repeat copy */
a = stack[--ssize].b, b = stack[ssize].c;
if(stack[ssize].d == 0) {
tr_copy(ISA, SA, first, a, b, last, ISAd - ISA);
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
tr_partialcopy(ISA, SA, first, a, b, last, ISAd - ISA);
}
STACK_POP5(ISAd, first, last, limit, trlink);
} else {
/* sorted partition */
if(0 <= *first) {
a = first;
do { ISA[*a] = a - SA; } while((++a < last) && (0 <= *a));
first = a;
}
if(first < last) {
a = first; do { *a = ~*a; } while(*++a < 0);
next = (ISA[*a] != ISAd[*a]) ? tr_ilg(a - first + 1) : -1;
if(++a < last) { for(b = first, v = a - SA - 1; b < a; ++b) { ISA[*b] = v; } }
/* push */
if(trbudget_check(budget, a - first)) {
if((a - first) <= (last - a)) {
STACK_PUSH5(ISAd, a, last, -3, trlink);
ISAd += incr, last = a, limit = next;
} else {
if(1 < (last - a)) {
STACK_PUSH5(ISAd + incr, first, a, next, trlink);
first = a, limit = -3;
} else {
ISAd += incr, last = a, limit = next;
}
}
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
if(1 < (last - a)) {
first = a, limit = -3;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
continue;
}
if((last - first) <= TR_INSERTIONSORT_THRESHOLD) {
tr_insertionsort(ISAd, first, last);
limit = -3;
continue;
}
if(limit-- == 0) {
tr_heapsort(ISAd, first, last - first);
for(a = last - 1; first < a; a = b) {
for(x = ISAd[*a], b = a - 1; (first <= b) && (ISAd[*b] == x); --b) { *b = ~*b; }
}
limit = -3;
continue;
}
/* choose pivot */
a = tr_pivot(ISAd, first, last);
SWAP(*first, *a);
v = ISAd[*first];
/* partition */
tr_partition(ISAd, first, first + 1, last, &a, &b, v);
if((last - first) != (b - a)) {
next = (ISA[*a] != v) ? tr_ilg(b - a) : -1;
/* update ranks */
for(c = first, v = a - SA - 1; c < a; ++c) { ISA[*c] = v; }
if(b < last) { for(c = a, v = b - SA - 1; c < b; ++c) { ISA[*c] = v; } }
/* push */
if((1 < (b - a)) && (trbudget_check(budget, b - a))) {
if((a - first) <= (last - b)) {
if((last - b) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((a - first) <= (b - a)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, b, last, limit, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
if((a - first) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
last = a;
} else {
ISAd += incr, first = a, last = b, limit = next;
}
} else if((last - b) <= (b - a)) {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd + incr, a, b, next, trlink);
first = b;
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
} else {
STACK_PUSH5(ISAd, first, a, limit, trlink);
STACK_PUSH5(ISAd, b, last, limit, trlink);
ISAd += incr, first = a, last = b, limit = next;
}
}
} else {
if((1 < (b - a)) && (0 <= trlink)) { stack[trlink].d = -1; }
if((a - first) <= (last - b)) {
if(1 < (a - first)) {
STACK_PUSH5(ISAd, b, last, limit, trlink);
last = a;
} else if(1 < (last - b)) {
first = b;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
} else {
if(1 < (last - b)) {
STACK_PUSH5(ISAd, first, a, limit, trlink);
first = b;
} else if(1 < (a - first)) {
last = a;
} else {
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
} else {
if(trbudget_check(budget, last - first)) {
limit = tr_ilg(last - first), ISAd += incr;
} else {
if(0 <= trlink) { stack[trlink].d = -1; }
STACK_POP5(ISAd, first, last, limit, trlink);
}
}
}
#undef STACK_SIZE
}
/*---------------------------------------------------------------------------*/
/* Tandem repeat sort */
static
void
trsort(int *ISA, int *SA, int n, int depth) {
int *ISAd;
int *first, *last;
trbudget_t budget;
int t, skip, unsorted;
trbudget_init(&budget, tr_ilg(n) * 2 / 3, n);
/* trbudget_init(&budget, tr_ilg(n) * 3 / 4, n); */
for(ISAd = ISA + depth; -n < *SA; ISAd += ISAd - ISA) {
first = SA;
skip = 0;
unsorted = 0;
do {
if((t = *first) < 0) { first -= t; skip += t; }
else {
if(skip != 0) { *(first + skip) = skip; skip = 0; }
last = SA + ISA[t] + 1;
if(1 < (last - first)) {
budget.count = 0;
tr_introsort(ISA, ISAd, SA, first, last, &budget);
if(budget.count != 0) { unsorted += budget.count; }
else { skip = first - last; }
} else if((last - first) == 1) {
skip = -1;
}
first = last;
}
} while(first < (SA + n));
if(skip != 0) { *(first + skip) = skip; }
if(unsorted == 0) { break; }
}
}
/*---------------------------------------------------------------------------*/
/* Sorts suffixes of type B*. */
static
int
sort_typeBstar(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int openMP) {
int *PAb, *ISAb, *buf;
#ifdef LIBBSC_OPENMP
int *curbuf;
int l;
#endif
int i, j, k, t, m, bufsize;
int c0, c1;
#ifdef LIBBSC_OPENMP
int d0, d1;
#endif
(void)openMP;
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef LIBBSC_OPENMP
if (openMP)
{
buf = SA + m;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(bufsize, curbuf, k, l, d0, d1)
{
bufsize = (n - (2 * m)) / omp_get_num_threads();
curbuf = buf + omp_get_thread_num() * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
}
else
{
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k;
int s;
int c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j); assert(k != NULL);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
int
construct_BWT(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m) {
int *i, *j, *k, *orig;
int s;
int c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((int)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j); assert(k != NULL);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((int)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((int)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
int
construct_BWT_indexes(const unsigned char *T, int *SA,
int *bucket_A, int *bucket_B,
int n, int m,
unsigned char * num_indexes, int * indexes) {
int *i, *j, *k, *orig;
int s;
int c0, c1, c2;
int mod = n / 8;
{
mod |= mod >> 1; mod |= mod >> 2;
mod |= mod >> 4; mod |= mod >> 8;
mod |= mod >> 16; mod >>= 1;
*num_indexes = (unsigned char)((n - 1) / (mod + 1));
}
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = j - SA;
c0 = T[--s];
*j = ~((int)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j); assert(k != NULL);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
if (T[n - 2] < c2) {
if (((n - 1) & mod) == 0) indexes[(n - 1) / (mod + 1) - 1] = k - SA;
*k++ = ~((int)T[n - 2]);
}
else {
*k++ = n - 1;
}
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = i - SA;
c0 = T[--s];
*i = c0;
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
if((0 < s) && (T[s - 1] < c0)) {
if ((s & mod) == 0) indexes[s / (mod + 1) - 1] = k - SA;
*k++ = ~((int)T[s - 1]);
} else
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
/*---------------------------------------------------------------------------*/
/*- Function -*/
int
divsufsort(const unsigned char *T, int *SA, int n, int openMP) {
int *bucket_A, *bucket_B;
int m;
int err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
/* Suffixsort. */
if((bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, SA, bucket_A, bucket_B, n, openMP);
construct_SA(T, SA, bucket_A, bucket_B, n, m);
} else {
err = -2;
}
free(bucket_B);
free(bucket_A);
return err;
}
int
divbwt(const unsigned char *T, unsigned char *U, int *A, int n, unsigned char * num_indexes, int * indexes, int openMP) {
int *B;
int *bucket_A, *bucket_B;
int m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (int *)malloc((size_t)(n + 1) * sizeof(int)); }
bucket_A = (int *)malloc(BUCKET_A_SIZE * sizeof(int));
bucket_B = (int *)malloc(BUCKET_B_SIZE * sizeof(int));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n, openMP);
if (num_indexes == NULL || indexes == NULL) {
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
} else {
pidx = construct_BWT_indexes(T, B, bucket_A, bucket_B, n, m, num_indexes, indexes);
}
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (unsigned char)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (unsigned char)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
|
global.c | #include <stdio.h>
#include <unistd.h> /* sleep, usleep */
#include "../gptl.h"
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef THREADED_OMP
#include <omp.h>
#endif
int main (int argc, char **argv)
{
int iam = 0;
int nranks = 1; /* number of MPI tasks (default 1) */
int nthreads = 1; /* number of threads (default 1) */
int iter;
int tnum = 0;
#ifdef HAVE_PAPI
int code;
#endif
int ret;
unsigned int nsec; /* number of seconds to sleep */
#ifdef HAVE_PAPI
int sub (int, int);
#endif
ret = GPTLsetoption (GPTLabort_on_error, 1);
#ifdef HAVE_PAPI
ret = GPTLevent_name_to_code ("PAPI_FP_OPS", &code);
if (ret == 0) {
printf ("Enabling option PAPI_FP_OPS\n");
ret = GPTLsetoption (code, 1);
} else {
printf ("Unable to get option for PAPI_FP_OPS\n");
}
#endif
#ifdef HAVE_MPI
if (MPI_Init (&argc, &argv) != MPI_SUCCESS) {
printf ("Failure from MPI_Init\n");
return 1;
}
ret = MPI_Comm_rank (MPI_COMM_WORLD, &iam);
ret = MPI_Comm_size (MPI_COMM_WORLD, &nranks);
#endif
ret = GPTLinitialize ();
ret = GPTLstart ("total");
#ifdef THREADED_OMP
nthreads = omp_get_max_threads ();
#pragma omp parallel for private (ret, tnum, nsec)
#endif
for (iter = 0; iter < nthreads; ++iter) {
#ifdef THREADED_OMP
tnum = omp_get_thread_num ();
#endif
/* Test 1: threaded sleep */
ret = GPTLstart ("nranks-iam+mythread");
nsec = (unsigned int) nranks-iam+tnum;
ret = sleep (nsec);
ret = GPTLstop ("nranks-iam+mythread");
}
/* Test 2: 5-task sleep(iam) ms */
if (iam > 0 && iam < 6) {
ret = GPTLstart ("1-5_iam");
nsec = iam;
ret = sleep (nsec);
ret = GPTLstop ("1-5_iam");
}
#ifdef HAVE_PAPI
/* Test 3: PAPI */
ret = GPTLstart ("1e3*iam*mythread_FP_OPS");
ret = sub (iam, tnum);
ret = GPTLstop ("1e3*iam*mythread_FP_OPS");
#endif
ret = GPTLstop ("total");
ret = GPTLpr (iam);
if (iam == 0)
printf ("global: testing GPTLpr_summary...\n");
#ifdef HAVE_MPI
if (GPTLpr_summary (MPI_COMM_WORLD) != 0)
return 1;
ret = MPI_Finalize ();
#else
if (GPTLpr_summary () != 0)
return 1;
#endif
if (GPTLfinalize () != 0)
return 1;
return 0;
}
#ifdef HAVE_PAPI
int sub (int iam, int tnum)
{
float sum;
int i;
sum = 1.7;
for (i = 0; i < iam*tnum; ++i)
sum *= 0.999;
printf ("sum=%f\n", sum);
return 0;
}
#endif
|
hermv_c_bsr_n_hi_trans.c | #include<string.h>
#ifdef _OPENMP
#include<omp.h>
#endif
#include"alphasparse/opt.h"
#include "alphasparse/kernel.h"
#include "alphasparse/util.h"
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_BSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
const ALPHA_INT m = A->rows * A->block_size;
const ALPHA_INT n = A->cols * A->block_size;
const ALPHA_INT bs = A->block_size;
const ALPHA_INT bs2 = bs * bs;
// assert(m==n);
ALPHA_INT b_rows = A->rows;
ALPHA_INT b_cols = A->cols;
if (b_rows != b_cols)
return ALPHA_SPARSE_STATUS_INVALID_VALUE;
ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_INT partition[thread_num + 1];
balanced_partition_row_by_nnz(A->rows_end, b_rows, thread_num, partition);
ALPHA_Number **tmp = (ALPHA_Number **)malloc(sizeof(ALPHA_Number *) * thread_num);
#ifdef _OPENMP
#pragma omp parallel num_threads(thread_num)
#endif
{
const ALPHA_INT tid = alpha_get_thread_id();
const ALPHA_INT local_m_s = partition[tid];
const ALPHA_INT local_m_e = partition[tid + 1];
tmp[tid] = (ALPHA_Number *)malloc(sizeof(ALPHA_Number) * b_rows * bs);
memset(tmp[tid], 0, sizeof(ALPHA_Number) * b_rows * bs);
if (A->block_layout == ALPHA_SPARSE_LAYOUT_ROW_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
//dignaol entry A(row+b_row,col+b_col)
alpha_madde(tmp[tid][b_row + row], A->values[a0_idx + b_row * (bs + 1)], x[col + b_row]);
for (ALPHA_INT b_col = b_row + 1; b_col < bs; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_row * bs + b_col], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_row * bs + b_col], x[row + b_row]);
}
}
}
}
}
}
else if (A->block_layout == ALPHA_SPARSE_LAYOUT_COLUMN_MAJOR)
{
for (ALPHA_INT br = local_m_s; br < local_m_e; br++)
{
ALPHA_INT row = br * bs;
ALPHA_INT block_start = A->rows_start[br], block_end = A->rows_end[br];
ALPHA_INT upper_start = alpha_lower_bound(&A->col_indx[block_start], &A->col_indx[block_end], br) - A->col_indx;
for (ALPHA_INT ai = upper_start; ai < block_end; ai++)
{
ALPHA_INT bc = A->col_indx[ai];
ALPHA_INT col = bc * bs;
ALPHA_INT a0_idx = ai * bs2;
// diagonal block containing diagonal entry
if (bc == br)
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
//dignaol entry A(row+b_row,col+b_col)
alpha_madde(tmp[tid][b_col + row], A->values[a0_idx + b_col * (bs + 1)], x[b_col + col]);
for (ALPHA_INT b_row = 0; b_row < b_col; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
else
{
for (ALPHA_INT b_col = 0; b_col < bs; b_col++)
{
for (ALPHA_INT b_row = 0; b_row < bs; b_row++)
{
alpha_madde_2c(tmp[tid][b_row + row], A->values[a0_idx + b_col * bs + b_row], x[col + b_col]);
alpha_madde(tmp[tid][b_col + col], A->values[a0_idx + b_col * bs + b_row], x[row + b_row]);
}
}
}
}
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < b_cols * bs; ++i)
{
ALPHA_Number tmp_y;
alpha_setzero(tmp_y);
for (ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(tmp_y, tmp_y, tmp[j][i]);
}
alpha_mul(y[i], y[i], beta);
alpha_madde(y[i], tmp_y, alpha);
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
free(tmp[i]);
}
free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
schelude-clause-modificado.c | #include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_thread_num() 0
#endif
main(int argc, char **argv) {
int i, n = 16,chunk, a[n],suma=0,x;
if(argc < 2) {
fprintf(stderr,"\nFalta chunk \n");
exit(-1);
}
chunk = atoi(argv[1]);
for (i=0; i<n; i++) a[i] = i;
#pragma omp parallel
{
#pragma omp single
{
printf(" DENTRO del for NUM threads %d NUM procesadores %d in parallel %d \n",
omp_get_num_threads(),omp_get_num_procs(),omp_in_parallel());
}
#pragma omp for firstprivate(suma) \
lastprivate(suma) schedule(static,chunk)
for (i=0; i<n; i++)
{
suma = suma + a[i];
printf(" thread %d suma a[%d] suma=%d \n",
omp_get_thread_num(),i,suma);
}
}
printf("Fuera de 'parallel for' suma=%d\n",suma);
printf(" Fuera de 'parallel for' NUM threads %d NUM procesadores %d in parallel %d \n",
omp_get_num_threads(),omp_get_num_procs(),omp_in_parallel());
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
void
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
const BlockExpr *blkExpr = nullptr);
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate,
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void UpdateMarkingForLValueToRValue(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
bool IsConstexprSpecified);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, IdentifierInfo *Id,
LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef,
IdentifierInfo *Id,
bool DirectInit, Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Build the implicit field for an init-capture.
FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return true if (un)supported features for the current target should be
/// diagnosed if OpenMP (offloading) is enabled.
bool shouldDiagnoseTargetSupportFromOpenMP() const {
return !getLangOpts().OpenMPIsDevice || isInOpenMPDeclareTargetContext() ||
isInOpenMPTargetExecutionDirective();
}
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
bool EnteringContext, QualType BaseType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
GB_unop__abs_uint16_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__abs_uint16_uint16
// op(A') function: GB_unop_tran__abs_uint16_uint16
// C type: uint16_t
// A type: uint16_t
// cast: uint16_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint16_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint16_t z = aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__abs_uint16_uint16
(
uint16_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (uint16_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint16_t z = aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__abs_uint16_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cnn_pool.h | #ifndef __CNN_POOL_H__
#define __CNN_POOL_H__
#include <string.h>
#include "cnn_types.h"
#ifdef CNN_WITH_CUDA
#include <cuda_runtime.h>
void cnn_pool_2d_max_gpu(float* dst, int* indexMat, int dstWidth, int dstHeight,
int poolSize, float* src, int srcWidth, int srcHeight,
int channel);
void cnn_pool_2d_max_grad_gpu(float* grad, int* indexMat, float* gradIn,
int size);
#endif
static inline void cnn_pool_2d_avg( //
float* dst, int dstHeight, int dstWidth, //
float* src, int srcHeight, int srcWidth, //
int batch, int channel, int poolSize)
{
#pragma omp parallel for
for (int i = 0; i < batch * channel * dstHeight * dstWidth; i++)
{
int w = i % dstWidth;
int h = (i / dstWidth) % dstHeight;
int c = (i / (dstHeight * dstWidth)) % channel;
int n = i / (channel * dstHeight * dstWidth);
float sum = 0;
for (int poolH = 0; poolH < poolSize; poolH++)
{
for (int poolW = 0; poolW < poolSize; poolW++)
{
sum += src[n * (channel * srcHeight * srcWidth) + //
c * (srcHeight * srcWidth) + //
(h * poolSize + poolH) * srcWidth + //
(w * poolSize + poolW)];
}
}
dst[n * (channel * dstHeight * dstWidth) + //
c * (dstHeight * dstWidth) + //
h * dstWidth + //
w] = sum / (poolSize * poolSize);
}
}
static inline void cnn_pool_2d_avg_grad( //
float* gradOut, int gradOutHeight, int gradOutWidth, //
float* gradIn, int gradInHeight, int gradInWidth, //
int batch, int channel, int poolSize)
{
#pragma omp parallel for
for (int i = 0; i < batch * channel * gradInHeight * gradInWidth; i++)
{
int w = i % gradInWidth;
int h = (i / gradInWidth) % gradInHeight;
int c = (i / (gradInHeight * gradInWidth)) % channel;
int n = i / (channel * gradInHeight * gradInWidth);
float gradTmp = gradIn[n * (channel * gradInHeight * gradInWidth) + //
c * (gradInHeight * gradInWidth) + //
h * gradInWidth + //
w] /
(poolSize * poolSize);
for (int poolH = 0; poolH < poolSize; poolH++)
{
for (int poolW = 0; poolW < poolSize; poolW++)
{
gradOut[n * (channel * gradOutHeight * gradOutWidth) + //
c * (gradOutHeight * gradOutWidth) + //
(h * poolSize + poolH) * gradOutWidth + //
(w * poolSize + poolW)] = gradTmp;
}
}
}
}
static inline void cnn_pool_2d_max(float* dst, int* indexMat, int dstHeight,
int dstWidth, float* src, int srcWidth,
int srcHeight, int poolSize, int channel)
{
int __dstImSize = dstHeight * dstWidth;
int __srcImSize = srcHeight * srcWidth;
for (int __ch = 0; __ch < channel; __ch++)
{
int __dstChShift = __ch * __dstImSize;
int __srcChShift = __ch * __srcImSize;
for (int __h = 0; __h < dstHeight; __h++)
{
for (int __w = 0; __w < dstWidth; __w++)
{
float __tmp, __max;
int __maxIndex, __index;
__index = (__h * poolSize) * srcWidth + (__w * poolSize) +
__srcChShift;
__max = src[__index];
__maxIndex = __index;
for (int __poolH = 0; __poolH < poolSize; __poolH++)
{
for (int __poolW = 0; __poolW < poolSize; __poolW++)
{
__index = ((__h * poolSize) + __poolH) * srcWidth +
((__w * poolSize) + __poolW) + __srcChShift;
__tmp = src[__index];
if (__tmp > __max)
{
__max = __tmp;
__maxIndex = __index;
}
}
}
__index = __h * dstWidth + __w + __dstChShift;
dst[__index] = __max;
indexMat[__index] = __maxIndex;
}
}
}
}
static inline void cnn_pool_2d_max_grad(float* grad, int* indexMat,
float* iGrad, int iGradRows,
int iGradCols, int iCh)
{
int size = iGradRows * iGradCols * iCh;
for (int __i = 0; __i < size; __i++)
{
grad[indexMat[__i]] += iGrad[__i];
}
}
static inline void cnn_forward_pool(union CNN_LAYER* layerRef,
struct CNN_CONFIG* cfgRef, int layerIndex)
{
#ifdef CNN_WITH_CUDA
float alpha = 1.0;
float beta = 0.0;
struct CNN_LAYER_POOL* layerPtr = &layerRef[layerIndex].pool;
struct CNN_MAT* outData = &layerPtr->outMat.data;
struct CNN_MAT* preOutData = &layerRef[layerIndex - 1].outMat.data;
cnn_assert_cudnn(cudnnPoolingForward( //
cnnInit.cudnnHandle, layerPtr->poolDesc, //
&alpha, //
layerPtr->srcTen, preOutData->mat, //
&beta, //
layerPtr->dstTen, outData->mat));
#else
if (cfgRef->layerCfg[layerIndex].pool.poolType == CNN_POOL_MAX)
{
// Clear outputs
memset(layerRef[layerIndex].outMat.data.mat, 0,
sizeof(float) * layerRef[layerIndex].outMat.data.rows *
layerRef[layerIndex].outMat.data.cols);
for (int j = 0; j < cfgRef->batch; j++)
{
int srcShift = j * layerRef[layerIndex - 1].outMat.data.cols;
int dstShift = j * layerRef[layerIndex].outMat.data.cols;
float* srcPtr = layerRef[layerIndex - 1].outMat.data.mat + srcShift;
float* dstPtr = layerRef[layerIndex].outMat.data.mat + dstShift;
cnn_pool_2d_max(dstPtr,
&layerRef[layerIndex].pool.indexMat[dstShift],
layerRef[layerIndex].outMat.height,
layerRef[layerIndex].outMat.width, srcPtr,
layerRef[layerIndex - 1].outMat.height,
layerRef[layerIndex - 1].outMat.width,
cfgRef->layerCfg[layerIndex].pool.size,
layerRef[layerIndex].outMat.channel);
}
}
else
{
cnn_pool_2d_avg(layerRef[layerIndex].outMat.data.mat,
layerRef[layerIndex].outMat.height,
layerRef[layerIndex].outMat.width,
layerRef[layerIndex - 1].outMat.data.mat,
layerRef[layerIndex - 1].outMat.height,
layerRef[layerIndex - 1].outMat.width, cfgRef->batch,
layerRef[layerIndex].outMat.channel,
cfgRef->layerCfg[layerIndex].pool.size);
}
#endif
}
static inline void cnn_backward_pool(union CNN_LAYER* layerRef,
struct CNN_CONFIG* cfgRef, int layerIndex)
{
#ifdef CNN_WITH_CUDA
float alpha = 1.0;
float beta = 0.0;
struct CNN_LAYER_POOL* layerPtr = &layerRef[layerIndex].pool;
struct CNN_MAT* outData = &layerPtr->outMat.data;
struct CNN_MAT* preOutData = &layerRef[layerIndex - 1].outMat.data;
#else
int srcShift, dstShift;
float* srcPtr;
#endif
if (layerIndex > 1)
{
#ifdef CNN_WITH_CUDA
cnn_assert_cudnn(cudnnPoolingBackward( //
cnnInit.cudnnHandle, layerPtr->poolDesc, //
&alpha, //
layerPtr->dstTen, outData->mat, //
layerPtr->dstTen, outData->grad, //
layerPtr->srcTen, preOutData->mat, //
&beta, //
layerPtr->srcTen, preOutData->grad));
#else
if (cfgRef->layerCfg[layerIndex].pool.poolType == CNN_POOL_MAX)
{
// Zero layer gradient
memset(layerRef[layerIndex - 1].outMat.data.grad, 0,
sizeof(float) * layerRef[layerIndex - 1].outMat.data.rows *
layerRef[layerIndex - 1].outMat.data.cols);
for (int j = 0; j < cfgRef->batch; j++)
{
srcShift = j * layerRef[layerIndex].outMat.data.cols;
dstShift = j * layerRef[layerIndex - 1].outMat.data.cols;
srcPtr = layerRef[layerIndex].outMat.data.grad + srcShift;
// Find layer gradient
cnn_pool_2d_max_grad(
&layerRef[layerIndex - 1].outMat.data.grad[dstShift],
&layerRef[layerIndex].pool.indexMat[srcShift], srcPtr,
layerRef[layerIndex].outMat.height,
layerRef[layerIndex].outMat.width,
layerRef[layerIndex].outMat.channel);
}
}
else
{
cnn_pool_2d_avg_grad(layerRef[layerIndex - 1].outMat.data.grad,
layerRef[layerIndex - 1].outMat.height,
layerRef[layerIndex - 1].outMat.width,
layerRef[layerIndex].outMat.data.grad,
layerRef[layerIndex].outMat.height,
layerRef[layerIndex].outMat.width,
cfgRef->batch,
layerRef[layerIndex].outMat.channel,
cfgRef->layerCfg[layerIndex].pool.size);
}
#endif
}
}
#endif
|
for_simple.c | /* PMSIS includes */
#include "pmsis.h"
#include "omp.h"
#define NB_CORES (8)
static int32_t core_iterations[NB_CORES] = { 0 };
static uint32_t errors = 0;
/* Cluster main entry, executed by core 0. */
void cluster_delegate(void *arg)
{
printf("Cluster master core entry\n");
#pragma omp parallel num_threads(NB_CORES)
{
printf("[%d %d] Fork entry\n", pi_cluster_id(), omp_get_thread_num() );
#pragma omp for
for (int i=0; i<64; i++)
{
int32_t core_id = omp_get_thread_num();
if (core_id > NB_CORES)
{
errors++;
}
else
{
core_iterations[core_id]++;
}
printf("[%d %d] for entry index %d\n", pi_cluster_id(), omp_get_thread_num(), i );
}
}
for (int i = 0; i < NB_CORES; i++)
{
if (core_iterations[i] == 0)
{
errors++;
printf("Core #%d has no iteration\n", i);
}
}
printf("Cluster master core exit\n");
}
void helloworld(void)
{
printf("Entering main controller\n");
uint32_t errors = 0;
uint32_t core_id = pi_core_id(), cluster_id = pi_cluster_id();
printf("[%d %d] Hello World!\n", cluster_id, core_id);
struct pi_device cluster_dev = {0};
struct pi_cluster_conf cl_conf = {0};
/* Init cluster configuration structure. */
pi_cluster_conf_init(&cl_conf);
cl_conf.id = 0; /* Set cluster ID. */
/* Configure & open cluster. */
pi_open_from_conf(&cluster_dev, &cl_conf);
if (pi_cluster_open(&cluster_dev))
{
printf("Cluster open failed !\n");
pmsis_exit(-1);
}
/* Prepare cluster task and send it to cluster. */
struct pi_cluster_task cl_task = {0};
cl_task.entry = cluster_delegate;
cl_task.arg = NULL;
pi_cluster_send_task_to_cl(&cluster_dev, &cl_task);
pi_cluster_close(&cluster_dev);
if (errors)
{
printf("Test failed!\n");
}
else
{
printf("Test success!\n");
}
pmsis_exit(errors);
}
/* Program Entry. */
int main(void)
{
printf("\n\n\t *** PMSIS HelloWorld ***\n\n");
return pmsis_kickoff((void *) helloworld);
}
|
krb5_tgs_fmt_plug.c | /*
* Based on the work by Tim Medin
* Port from his Pythonscript to John by Michael Kramer (SySS GmbH)
*
* This software is
* Copyright (c) 2015 Michael Kramer <michael.kramer@uni-konstanz.de>,
* Copyright (c) 2015 magnum
* Copyright (c) 2016 Fist0urs <eddy.maaalou@gmail.com>
*
* Modified by Fist0urs to improve performances by proceeding known-plain
* attack, based on defined ASN1 structures (then got rid of RC4 rounds
* + hmac-md5)
*
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_krb5tgs;
#elif FMT_REGISTERS_H
john_register_one(&fmt_krb5tgs);
#else
#include <stdio.h>
#include <string.h>
#include <ctype.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "formats.h"
#include "common.h"
#include "dyna_salt.h"
#include "md4.h"
#include "hmacmd5.h"
#include "rc4.h"
#include "unicode.h"
#include "memdbg.h"
#ifndef OMP_SCALE
#define OMP_SCALE 256
#endif
#define FORMAT_LABEL "krb5tgs"
#define FORMAT_NAME "Kerberos 5 TGS etype 23"
#define FORMAT_TAG "$krb5tgs$23$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD4 HMAC-MD5 RC4"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 0
#define BINARY_ALIGN MEM_ALIGN_NONE
#define SALT_SIZE sizeof(struct custom_salt *)
#define SALT_ALIGN sizeof(struct custom_salt *)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
/*
assuming checksum == edata1
formats are:
$krb5tgs$23$checksum$edata2
$krb5tgs$23$*user*realm*spn*$checksum$edata2
*/
static struct fmt_tests tests[] = {
{"$krb5tgs$23$74809c4c83c3c8279c6058d2f206ec2f$78b4bbd4d229487d5afc9a6050d4144ce10e9245cdfc0df542879814ce740cebb970ee820677041596d7e55836a18cc95c04169e7c74a4a22ae94e66f3d37150e26cc9cb99e189ef54feb7a40a8db2cb2c41db80d8927c74da7b33b52c58742d2109036b8ab27184609e7adff27b8f17b2f2a7b7d85e4ad532d8a70d48685a4390a9fc7a0ab47fd17334534d795abf83462f0db3de931c6a2d5988ab5bf3253facfff1381afb192ce385511c9052f2915ffdb7ea28a1bbad0573d9071e79dc15068527d50100de8813793a15c292f145fa3797ba86f373a4f0a05e5f2ec7dbfd8c8b5139cc7fbb098ea1dd91a7440134ffe2aff7174d0df13dcad82c81c680a70127a3ec8792bdecd74a878f97ff2b21277dc8c9a2f7bbcd9f72560dd933d85585259067d45a46a6f505d03f188b62c37edf03f117503a26743ebd674d5b07324c15fc8418881613b365402e0176da97d43cf85e8239b69aee07791233a959bcaf83a7f492fa718dd0a1747eaf5ce626eb11bda89e8235a056e2721f45c3b61442d893ef32a8c192ea0dadb853f3c6f3c75e92f23c744605c6f55578f696b0f33a9586b8aae3e12e38a097692cd9a31d780d973eaaf62ef23b2fc9ae59a38bfd8ea14d3289b46910f61a90aa733e66382bc27f40ba634e55ef1bec0ca7f71546b79566d85664b92f9fae495fcef5cde4c4399a6798569a7e81b9cc4bdde7104f3fe181401f82bba944e3b0a406c7093c00ff9d5984a82517b1a64a8aa561bc1f0cbafbdbbc5654d375c91d4e485e17bb06838109fbc1504147481c91652f545086a84daa423a6286ea6bb13460c5ff3d865a7b37b9ce4e7b07fbe2f6897c12c1e4df2e875c1ec9cfbf84097a7f48b270baf3481263b21849ab93c231490d06a23461a5e00c23df76bca8e5a19256d859304e1f5752bf055ac7f4843e1ad174f1cbbf5c142958f9310025ce439d5979982fb0b8c2ea95e1a22ee8dc63423d9d364cb0b95bcdf89ec4ed485b9005326d728757d77aa3e020a4a61d7deb782bc5264dca350173609772cd6d003ee8104dd24d310c9a18a44f78e27d65095f5bb54f6118c8f0d79ad5a850cec8d40a19bd0134144e904c9eb7fdcff3293696071fc1118f6b2f934281a25bcd5ca7d567714b1e43bd6d09bfcc8744c0ca273a75938394ac2fb31957287093346078577c94a71dfa6ad4a63211f54f00ef7a9064d070aaff84116ee891728915c938a8a32e87aaa00ec18e2b4e9ae88f7e53f08d855052a995f92351be32d8df934eab487103b0f089828e5fb5f73af3a8a05b9fffd25c43a392994743de3de1a2a9b8bba27e02ae2341f09d63aafab291759c41b9635521ca02f08e21e7e5c3ce75c8c3515eaa99aeb9bf8e204663e8b6b8507ecf87230a131687b4770e250ba0ef29fa3ca3b47b34971e17c6a3ef785acdd7c90da9d2", "test123"},
{"$krb5tgs$23$ee09e292a05e3af870417758b1cdfd04$a1a480b8505d2f2f0ff06ddce40c2f6e76bd06fa64dcd5b0646a68effcd686b2e41562ebda90da7e7b36d95cd16ca8a33b8d99184d6b7fa7a2efec3a05dcb63b3e815ffd38849dc69174d1efb3a871544b73a6da55d2331bd4b60743d1654873e3c1748ce155c35a1711695296ab944d158e374b67f43dd07eab2bcacec1be480e5c1338e3834f7133909f5c7970ece39e73bd96d40f696cb5a8575e5e1feab937b616d6180cc3258e22b9fc495017593e15fc10e674af8184c282a0d80902ea9dabda5fb0a56d7980bfd4b62b330155cd8e318dc5be55500cb8ddd691b629af371463c411f1c11d21811e1546477b85f0a85e296f5df737930aff5015111d2f01a236ab7c77e9dab001f52400cccbcdb31bb180db027bd0fa2f6000dce7c1e072c0effbdee23a401720b1fe54a09a75430497f42f6e047d62d1123866d6ed37e58f8e2c1e462acb1a97a44a5ccef49897af190a46b3ab057d18c1e47d717c7a63658357d58d9cd5b7672f0a946f95f6e2ec3aee549e20e3b11237ea59f87723f24e03a6fac9e51086bc84142631ed36ee6855920f3d3d1e85d0faaa0a8b04a2b050b17f94d44af7f48302fa70dcf43279415983924e5d874c59722b6fb87ad1006fcb51e4341bb2cc4caf8c4b7993269af219cf4efa12b1009961c22f123c35f982e4ca75a97cd37f7f16be111ad301637ffb1664ccb021d3cf6bf771e07dc42202dac079c6bd7559f8e7a939bc14e9ddb45fe1b88c5f83b1ff966342bb9211afd15772cf5f871d39d0b30776d51d84b046df30d250c1877d146047e784c4bc2e6745f357dd0b1c6aaa11e26a0e3c2772781695f6a3bc536ba19e2327ec8c0866bd78d3b5b067abcf6991eafc8b7a11ad4049711263f3c68b358f246da1308d5a0daac1d7efedbc237be3d6a4bafe5ce66e941f7227d2b869bda637dfd223a4546340c59e7d0e2b58f60a67590468a53a5d28cc35cec36a9c5610c70c0633767539640b42cff787f4782057ff70d0e64658413429347f5449c1360da4d0827c4197bbb0361c6d0e04bcaf6bba1233912f806772146c0e778ac06749bbd3d8819007d070ae912580ff11a41f71b0907b88fb585585ebe42b4cc4ecde8ff7b49a856dd70f316425e53feff3ee6ca1e44d9ba5e607a41cf26edf44bffe2796f94ea2d767fbf81f665a7fedf0291e76c6fa409dc99c56954f21edc77f6173c5a3a909c8756f3cc5cc6c2d2e405f333ee0b50284aacfb81f9dfc6058b78b282db4511580eb623dc393919befc250d224490936e5fb16c483f4bd00c8915288d0ddf3812eaa3d46ad5a24c56390076730d23b2de6558ddadddba725f9b4a73d13de3e1276fc285194e3a2f613d9b020d0485d7e26b36b7b917f4911024127320627066fabbd465b4cd5d5fdebae804d15db0f5b276659364bec32a13a8d9e11349f54bd", "bluvshop2"},
{"$krb5tgs$23$*Fist0urs$MYDOMAIN$cifs/panda.com*$423cb47a258e5859c13381ae64de7464$8dd47d94e288a1b32af726d2eac33710fb1610e4c6f674907d7a74d26515a314173b2b531baa790b70467ebe538fc9e941bf4d7f7218a4ec17c1dc963b717d5837fcd5ae678189101a1b4831a53a1322ca6e8f5d644e4aa72e99bedb4a0e967c3e05ccdcc96137265612969a1214a71038dea845250cac45551963fe85f193d88aa39ed57b95b934295e17de04ebf0ad275df67f65fb1fc2ee3095c6af02c4c1b8efa570e1c2ac562601c5ac89bd6f59ca8b957660aa00787d4a0f9d9f29b15eb3b85823f7c9814eab9106210c37d863cf8413391c5941a994fdd52a44e4f8e8e4c9b8b520e62015fb5ed40e91e7a453b3ddcefb888fd896c187993a899b6a30d27a5b2b7847a410c0cce8b0fcf90367bfd8e6dfa7eb37676ecdf500c9a51ffb59792c13e222371e024f857134b7039931daa66a6902da37e71c41adf83846a9df1e75575696d7a6f1744d48e8215849773903c9475c29a1ec0fcc11257f9479467c2b65679a3da298e6806d619794dfc06b10b5e0a46e395c3ade3d750292f244cabb7172d83dbd42c6e3bd5a93a8c2d5fe84b23a3c60508733f5a087763f2fa514d18f891461b8ea22f7eaa847906182bd0415c28d197c06df8449cc2c6c2016c38672a67613a14ccac9025c4da85fc0825dcd9a1269e6064f80c0de445fbdd237d35ab0eb6ae468413c5b17c9955a8c8c34952c8a188bad7e5b18651a75b1c46cf116422378a94a19c31dfa634c8ab15f4f13e7e427741ab9e8f247b4a8fe2562986ee21f602b4fad45bd535718020b764da6f346e3b028db8a1af88419f3ea9141fcf0c622ed40d894814e5d60a9dcdfc8344f802c7b2f0089131e57ac0cc071af13c3b2b7302e9df4665c48b91f4ef0bb2a60a272e5841e0ee8da01a91773d41f295514b65ccb2190195f720d9838b3e7c701b51e813ef0262fbdbbe06391ba3fe4232e74523dfa933e6d3df2494ddd9f254afdf97623ceb5d32483a870cf72a57617bdbf97f0420c041edb5a884ff401dc21da0472d7a75d89dc9937fd65c3a422063ea44e3954435d38b8f34cec2c0360c8bef392f77fbab76a7b801e05b467d4980d20f0a7dbc1c39f50ce4429df1ec167c6be67d2fbd507a3f7b5d98cf214ae0510fac51e1075a06250d65a3a1179486bda5d982b7904682835079e3042f39a582492cd14dbafb5826e242c81998752043e2dd91b648f115900595f5191a01f187c4b6dea4917e4773a5fb28cb1d20508142a3905068c931a8c9a8fa291b92f8ece9884affd8787a5aa11858274879160e930587f3c32e2cabbd124c708641df09f82d05ab4db157ad24931dc36c616dbb778762ead6a8491ce8a48037106d382283ac69422c04af3ae2cbe22eff6dff21bc34b154a5fab666870c59aba65bd4e0ea0be3f394bb4901fd64a0e19293b8026188615c84601b7fecdb62b", "jlcirr."},
// 1-40a00000-john@HTTP-adc1.toor.com-TOOR.COM.kirbi file
{"$krb5tgs$23$e43ae991b15bbd11c19395c6c785f4d4$07ea84f4cf5ab2ad5a1a15c5776e7bc024d26451771e653c9cb0b87d8a5d73317f992913621a61039d143818585aee976b5273f53023d28a1da22c8a2f79e47956da4221bd10809fb777b4684cbbc102bda46dc816eb5a5315196f1b2cd47fee6ddc1adae753c96eefe77bf8e8e54e33489595f0c3cb47db9bef77438f666c15de4ee9893839c5280daebd81d476a00944f8282eed61af43578fc6f68dbb47ad9106ea1f58125355506016ccf997d35d8ccad169ba7eebe27e76d19188a227158172b405c7e053da1e3bafae4cd39594e7a03e7a96bdbc63a793fba6c26135d6d1789395f0155341e04f80097540ffb1f299f61960a34db3ea14b95b4633b7eea3a552140e7e42708009fdda3d1b42b3297142bfc036abd3d28f07ba1c8362e1c5b346f55af7214314a92fa412733825f55fe4a56b56859af00eb4f69cc7ad339b7bc8032ff1057be3e73c5533f4f546e599ecbf60305569c9b87b22971ef012ff92f582688b001ad23901dae743c46cae6603f7b6b88db78fcfd59997e8a1078f8a27e28a6628bc59d78674d9d16a6413da369ab58cb702dba01c710fbfed87f4665dfb3cc4a8f83ebf960435ae96973e699cd419324ddf115825c99890b2bb8e35ce0005a2adf95ce691b135358c63aa87088ed615c5a9667927e691bf7135677893abc41c038d25ff9091c14e3d1da85c7f0edaed32c9b3b56d2c473b2363b93aae5cc9b02db47e7a22a639a951e2edce7580f691c2ee0f8ebdfb02cdc6de8d1028e34085d1a69cdebb00a430b5ddce223bd1cc9c66b746a46584c098f051b97180ee8db4268a3a838504884df45227cac6fe9e73704877f04558c9640ac2ed33b3216b2e17805863a955285f4633407097f439d7063faeacdcee0d6d4e6c2adbe85df0e51eb3c08da1cedb4fa70ff74b2711a7294b499597c1f30c1dd3cc12751692311a16e22b3fa6af75eb0ace4170df497ba860445b1fc964771eafc034515918bb080a6d05ab1170708e6ce80bf9b00f808a2b814e89d0ac9b5d1a23686b48e99fdc50c71b5fef8a9bfc851e40bed59f69821109be0119151768e4d91b8b00c46b39af207ad4a2566ce7751ac124c3c5851cd1026052d34988272bf2851bd1a4536816a7635d83c1378b442eb04c15d5028763e0b189c8f45703c54d62aaea570c9e56b0e721d170cda74f91a4101c495fb565bb03f2ad635335c88db112dfb073bb4d1547de3214de5e371bfe9b440de3882f7b83593ca0fc60f4e6e2e3885b2a365a56b529904c74bc58ab38432f0dfbbd3f4d543f9d8685b0aa69aa807701e09e1253b6ed4948c7ceaaafdd0baed2663881d52a163101a5bb697a65b2bfcc54d0dd", "1qaz@WSX"},
// http://www.openwall.com/lists/john-users/2017/02/24/1 (Kerberoasting, TGS-REP)
{"$krb5tgs$23$*mssqlsvc/w2k8-sql.ville.com/65498*$0f5394cf9746bc8ff5b090f89971816d$2e86c9139cb881c784377c14abdbf4058eaeeb19476b0e54dcbd0599c6c349f96513419d80b73de389890ba1f67e94a1f5d9f29aa36d3cfc86c6b047afd57173c723388a1f88ec80e2575dee2a42f5a1c4fc39be69a303ce12328ce6b6e17cb7660312a93774c48ff972fdb29557c201126aebfd5f1d0ca116cc9f5a1b7f7a0847486a988663171441d9e0778ceb160fccd69e194bd6e350cd10f39628414a54629a0e3f170b7bec339ec4ee89db0eb558ad4afd086e0cb90f35b596fc89d81e4f18d75dba84246d2b5e446099f80714d88a251c4e1ce31682d2ec754ac0a2fb0ba56c93c075722318f1152041bc0a0ef558efa1a2b56043df12596f7f0880643787fde2db55a4153c183cda692b23b4ee796488d04bbd10b91e51a5f2d0753902e95534fd73433d1ad268f5caa67e2436e7a722451e8bb07e148928f4c8c416151e440fa99c03543bf21c30e5fd299c31ccb91a058b650aa07c89e84545a84a437bb215e68df7627f90ad1766e6f0ca31858d023376acf1cf06faca36e4054acceae001ab5ac96c8ba4e2a6d285ca837a2b9bbaa9ed4a92a27ae285b67f2198f0461697967826a916d2955707bde3af57e3c71330e3cef292c273ef274379aed1d9117c07c245c0054a3beb4e5dc3a4b960fae326d5f1a7fa517327c514ab4f33b9c2942f15a0b453fa8226ee6bdd2310da2da169724041dd3fce4baff594f37ee6c8d1b62da27d21e7492fe05e7f2c9e4a0d3a9540b9a50c1fa697c6311f2af31b6e743f01f499c2ece315ad74c861f379276b8d8a50c41eea5cb0b2eac7f011d759b09d2cf0aecae519ea5a25fac4cc726ae56ce76136049256e7375fdb2e3ee60f408e28657872514b63dcff78bf2840e71d9d318620409f2ba171a6cb8f05b56fa1f0c39fd91284faa497df8e1a053160fb48b75f4ddf94c9f67bb6a248cd5008931a5ba5768d51430b0a8f8f43c928d1a693725f4787999322c59fec3563fdc9bdae5981f1398a843bf4258433d4f79ad5fea293926de05dcb60668d349650e015ce3e17b1860dea3989bd87f698c5dc9dece7e4733ba069dfa86e8ddc02e13c6de02724dd7d6fa48f25984c1666341a61c4008dd66e6a072a278ed6f009a3a3c0a48946b8d7ce7c22e1009cb6d482a7f3b7105990a1770fae62b2e28281ee5ade79149a8e8a8efc77edfd1308f4ba7f1142f5fa0a73d08ec9a3391cbbfb30c586e001db0fbda98d3fdaf6751180674c8c097aed64ad870568fd4ec55ce9afbdb301954b14115df691213483825286b4c5f86f5bd71d99ae757e4d8c17420b73a4bf37e8584141c5055dc38ca76c16536e85c5b3e88fbac95e626391569de6b0d9da0cb0bce65926927fb37f892a059be16e064ec2fff275b976540b017f18553756cf3e6f2fe5a08bf8fd8cca8814ebae6124fc766bcc93eeb375c19e", "Compl3xP4ssw0rd"},
// https://hashcat.net/wiki/doku.php?id=example_hashes
{"$krb5tgs$23$*user$realm$test/spn*$63386d22d359fe42230300d56852c9eb$891ad31d09ab89c6b3b8c5e5de6c06a7f49fd559d7a9a3c32576c8fedf705376cea582ab5938f7fc8bc741acf05c5990741b36ef4311fe3562a41b70a4ec6ecba849905f2385bb3799d92499909658c7287c49160276bca0006c350b0db4fd387adc27c01e9e9ad0c20ed53a7e6356dee2452e35eca2a6a1d1432796fc5c19d068978df74d3d0baf35c77de12456bf1144b6a750d11f55805f5a16ece2975246e2d026dce997fba34ac8757312e9e4e6272de35e20d52fb668c5ed", "hashcat"},
// 1-40a10000-luser@MSSQLSvc~example.net-EXAMPLE.NET.kirbi
{"$krb5tgs$23$70c1576b3fec9b24ddb925efcbdc687c$ac33782f96977412999a6e1010f8b5e099da60c31603280188290bbd336d6a10b029bf5e3eb1218870e27170b704334f4e60b90e5ebcbe7ef102d06785a00c28f337d2995c347493548d854f7208abe4405430e42b6aca8b6d640068d5ab05c2c0176707dbdc096628925937345a9e4f67692773b0df58c36703bb738191681e7424fa85fe964b8a6bc4ef379da8af8513582ccfebf86dd2ec7bd91a702d2eda40d8882aa2042ccf5ca40b7eb370643b003e3909d08a433be7657b5d695ff3abca64191ead8433c2638e08bca64011406a3724aaf70d153a69cc84e8c24b98786ed24a57b4a312346ceef1f30c5a1e437049af054071fdef28747f786207ce7e085dcea3aeee31a7aca11022308fc7db549b0285565710422c9f4dba94090f8ea34113050f75e5e850303e18f29cc6fd8d45a87730bacc9258d179db9b98524f7c1041f7af71ed96f816c7cf73d3d8f8249d9a485fe56dda09f2ca41edda6cf3095f4aff036d9ca71cbed651a1f89bd5607962ff395a3398ef9e4ece1e9ecb59cacd41331c2971ff03f9016875dc03e96a4ade7d318a50af1724a95c8e2441ecd22f041d31f49cc461de3869f930240e02a1f7ff9af331dd48798acc45abf48d9d29caaeda467e4194df14b3dd5678abaf56fd092b9c8a6858d351ed14126e0a7e78970ba462d71a5afc50c544e64c5f708e63f34b6363c0d1921522959a1eda4f46096874b48d88b3fddbbf8984e2a1b836f6bec614806e41aa1b2ac3165942d371c208621ebf2dfe99cfe81367dffea3b3d7a7eaa1eed76d3f3bc9461e884cb3fc747ed344579ba7d803d6e775f80d71fba90535602c016b63d14f50ff8732e6f4f0eb4fdc47b7cf84346f21498d4fc2f3124ce16fcd41caedc5178ccc54cea6298c2e938d887991a84c4dfa71541ca0acb154a1603e24004648e5c81a87a7aababdb48c8cd091cea6cc7aeff2589efd2970cfa9fa073c26ed024fa2864c75058c135d3e1722cf6174169ea69dc346ccf3773ea6598f2597ba7840fc334bc7571c534384f3e4301cc430326c480d16defe7bab77960fdb939208a15445676a488f1cbb02e577cefb51b7bf465e73ded374b3224837d76a4163b8d05cfea83e216dcb6e6441c61f1d90f4c1f592b7538105a63af5843138406173eb3a1df3060312d420c0f360f1054c605019b77098cf8d6684c3c33280e3c15b2ffc776b11e08a417225ec92dbd25d05a8ffbc4662cae0cb14ccb157f36c84baea827cbd14e1d02b3f780d6339128c8ff8513e1a28006f8ffb531d798af6880bbea0044c5fbdbad3a17dfcd028ea334e2e4ac5b50819ac25e6386870105b2a9040324ad014041141898dd40e18f5a2acd7f0c8b8cd9d58975857f2df9582ba6d5a78ead595bbfae5451f7e6e261209c36c9aaaca4d2f53b0b9fa0afa2cfc5228a027599d816b8ee7", "openwall@123"},
// AD-2012-luser-openwall@123-TGS-SPN.pcap -> extracttgsrepfrompcap.py -> kirbi2john.py, same hash (with better details) is generated by krb2john.py
{"$krb5tgs$23$bdcb2559d28a8857a88102b8c131b861$2a02a3a7d75ecbdc2152588b64c1668a613e0d670cfecb723541096e34a5cf144d151422f3892b5392adffa0f26b03ef6378874e89dc950e395269eb2f114eaf13c61bdd02a2a4af594ab7d1743d7d33d3a9953262f173e61bed92bb097e3225128c9d1531d09cf940aafb9700d9245abb7c2b66479af3fbf9f022eaec9f80b3b7e497b7f25bd9ba796e0a9fb9aae3842d980fa511c0041b956f9f24a120a3a14135b47da44fe3a45c3177a55dc5b1986c1c279b96a5c63869733a61d12d141ca2b969a06ef6cb33597567e01165168a20ee3267ceb32a5553659e9c88d37ca6cce7ebbded80941868369238662f26b10d36e4f2a9426a84fc5abc905b8286cd8bc87bdda9c36a3902222f27e4007fb3d9fb29b7dcff1414584e5d142c35de02e2d3634539ccc787d374686ad67ea4f3aacbd4a418459a8d3d065e4ecdb3b270fafd8d48cd9022a9d9ca00a0d64bcd27046e4bf809f95a94dfcc9700c4c6921e46b1260023ee62c2b87b9873016b92c262385435015e23cfb3f4aa7821c2cabe9b376a1385a32c986dbd5b2466fe20d5358d228810e3b0532bfb6ce690feee3ec0b7ed9f9211ae2a34fe5cc24599d4d28668b869a3fba93b948d57a96da995402e37bf07a8d42cd0bde9b140496e6c53c072d44e28b958659697da4a396607950c5ed54640615ff0857e1148679366206153f5076b4ebf5f7a931335cc3afd0873eba1164400810c5de6cbaf04af5c2ef92ed616d57e14c8eeff1289dc9d53bd94ff0587653526147d1525b66a4012aa2dd67991a86d4b680458de43d22048a1486cc5142b85ebd2e1bbcca572fb155812cf4dc5c4dad7e1e3297185958f623f4e81d657108a2b4721e54fb4ea8806a6b9f0726a5ef2d9eb4a3e2eaeab1632666f20de1d57e58181a5f231a6b2862ca6b7cb33b79e918356b9a85f69de3854463c93150e291727ec6e82cd0a1bc284bc2101bedcd77d1ccf052b736b074e978baae93ec908ba924c445951b66605c05ede1eadc0595f14707aba99bb35ac222d3d34d9b1f59c5edecee434385f4a2a30d2aa8f4c339a23c91b943affd5c6bf51d9deb0116f323f1a3253bdede5e9297b2b202a227f3670073c7d29a6afb21814421d6923f99cb2fe976fd24169c2d97536df04e043e93ef8a81fd0579210d52503ad0cb4a4abae164b3520e9d540eab0d8c1cc88c5afc5bfb253bf0bd02e9c9f61427fa83340c5d1907cf00f6fe27e820544a55c6ba0d0ebeba96c8ad21ceeb0967b2fcea324269dd0b73d5cfdd1e568d9086d7ea93197cda743c7e90b9939fcbf3976a3aa01b4fbea4b849f3aeaf6c1a70ba6df98accbd0545d6abab6d0598e21d450731c68337780171bd440743548854e9643c8d1ae51580fab64d314d697628ace7616287d284f53831a3a831e75a0fbf7bfba917e2ade6f7f2fc78b3ed4000652", "openwall@123"},
// From https://blog.xpnsec.com/kerberos-attacks-part-1/ by https://twitter.com/_xpn_
{"$krb5tgs$23$*iis_svc$LAB.LOCAL$HTTP/iis.lab.local*$0f6fc474db169aa8ce9b5e626daacc9d$1a346ce3f66c52976f53831aa24a1b217cdf0d68a0eb87fee00cfd32f544bf83ebb6416732522b12232dd6935eac076b439f56e6cb7fa6c37d984d132e2d2cb65ca399cd5e44eb2eb41f12c40f9044b40e3ea914278c8a3098babacf49ab46e776d1413ef63abcdf6418d2db9241b2fdd9309346ec59af20a82fd6daea9510c1dfd1a9e8d99c59ff72e985057ba0d18394b0a7cb1bd74f8d436a3dd780175a0c6bcad9e46570a476ab9913b561ee481ad8c33a3c81ced055e959f08a52eba7a342f53183e1531be8ec2d28c7ecfa32f98dbc7ff87b4e5c79824f3868d38ce09010960726d58cfbfc88c9d34ab199169f39010aa4aab92b6ea40f875963d518311b3f079d97b65fe9768c9a4ee50f7c16d525fdc081ce359a0b0fe5fb18d8d8690d8f88b010bef4f28dc151a4137272ae9eaca9053406c0ddeae453196e3b6c28b8359724bfc089b772cbae093bf88abc070d12b0ff2e721d7b8b10b822bfb514091effaf3f5fa8c286a9e45bf76ba171e6cabeb3ddadc297185c51a295855b8cfa8062bd6770093355c32690fd184d6eae2b66ea1f553cbc7679681db5089fdb23329efe59de807e657a98ccc0c2d95eaac9f363d5b8c9b8a23aab680c328b019ae99440a5d8795014be22f6739a4f77874e94196f010c012f9a4a587570c38874ad7f8b9ec554fb865752a5f3dd4f785c9af54031100ce580dfadf4c70ff11839647fc288fce8d00bbcb680e02a46230ecb0530ba1771fb8485ba17f5218852c5cdfc769b89d77b37802cc6d22e6ba944f6e4b565d8d04418c44bf10e06294fd58913ca6d206bb6e46f15b3abfc09695f5fbab81d2e743ac19b24716d9d6cb6bae65674f5cdf1935d1413a4be6d96eafaf65cfa361decc0ab1e12998b5c26b6ad38c8077fd149cdeda227c4c68f19fbf22b23e7e84581a64a413c1c983e01b56c2000656b4aad8c67260fc0142eeccd96d624fa284b619d11e797af2d730a5998d9e6d9f4fef58a847d7d9b804be2925beae627a0a9f335072f97f214a24db58cf5e2e74f0eeff1a43f1ec1b88c0110f3c2abaac0d3e954a42b550c37cf84babe6e85ec4e0885eb8309a4c5e2a1bb473b332ff5c31c0b4c32db507c1eca5b7ae607d2423ee1e7f07361229e0ab2678cfbd07afffc5e989c5ab1821ac2f524083258d3f0ca7e7f8250be3f7cc72cf636b098a3c9b3f4e289fd81a9b3c33bfa63ed8813bbc12205134add9fb8548312b734c921a2cf8a1687af7ee022b0f57bbf0f8d8f17952614cb288b95df3fe4f03d20b83227328603dafb264537eb0cacda18de21aa99e07600030424edb41fc3c8161238971bf62af99db8e2d438af06f9d8feeff3edb6a4d4f0a6fb5dfdbe99b1ed454d6ff3dc508c45ed430923212a088e6200b2076da509888edd32fca946a215c8934db7a3b5ac6bed10e4a114f2f132608dbe236cba73cbcffc024fb500e96c3d766ca7f4083ded3666c2b7dcd290f65f7e80ff70fa575777a845fbf7af05b38dfb1ccd7accc0398f8dbf532e28dc6bc0ec49d18f2753caec5912693a0b6050f2bfce72f5160847dcfc78d580609007ddbdf1f338c61c13e7b62fcec6e51d1c0cd1ec0167e40042", "Passw0rd"},
// ../run/ccache2john.py krb5cc_1000_linux-client-to-AD-server
{"$krb5tgs$23$8bef2ad33c7d5d23c8693ccd868d9f84$f72688ca744f9634625c2dbd94ef6193644e08c4627839ce5fcf8bbfbd15ad7093fb3ecc24512ac161188a7ad2de127166f4f244f37527e15468d844efe6396756b8cf8341cbfe70e454e1f2b324f5ddee64f4f4947e1bb6776041e28e6261b57b1791677b35e1b4171ec5a05062b7bd7eb5558d6e4165ad916e790954f163a7f7e8390462295d74eff33fcbb442d4281f7cf68ddbb6ce27030015b2cf4f68e04793a9674e634663a68622e13a5a6d8c27bf8b90ee18033e754c95776d1dbd8d73ab292d27c445eb33b9b814945617f527678667b2d81adc57c7c62713d7826bc9ea918b330245a4d7fbcc33889901a63131be87a0d4e8f49d1259141677bfa27c284d8cfb1c94f4d4a602f30b774e58378710d0ad2a73c978c8c6470402c0cdbdf5d505fe07dc4250510836d5ec969b56790128aa75b292d71d4e6c77ffee261230fc7b11969e4701abc63f120420cc0f74f99e42d6613713fc0530842f77b7a45daadb296c744e0725cd2867492b5bc1cb6a4e3315e1f6b7e5b0299a9283235769d95ef63c885352b08176547f86a03ec323a2e5c84f66f3ab699fc7c5e8b2120659c963d5c98df9529939b250e0f1703f5a9859f8b00a5317aa92dc37093dc87984c0eb841cd2ce77b80510c79e8b5eb56c15f07eba936d257a4d78f82a6179ba7ed07fbeba68d43112e44f850a5034b9239c689828f7039b8c8efcbecbc34ab92d5fb869482456a92ce9201e6586458e1d7bd38b2251b1fb573dc1c6ce40a428448cfaf18e5251436aece6baa2356a06a50a6d4c011977233e7b38007cad4a869941fea1139ff4d2ce216166abf5dfa89dc85719b14eaa0d492b1d442b67cb10c81bb35e1a96ae4f62e8c4fbf604c916e2ac4a121620a2beb40cf7d43bbe7ddbb38435dacd7aba846915eb01b57d33d5dcc25082e79452416d86143e33608a8e279d396cca4a8cc5d02ad820868c4381f9f7f5bab23ba8440fd9ab5ae713cb1c20a82e0a0e93a83248b2a2a8a30aceed71a8e78526943b45f122f16ea004ffdea16005bb52e184b7498c8aefb26da2e8b43e98343e9bb364578deceee623656985fdb58dda60a112d9dc1f0e36230d43ab8d1f48ba2013fd641001f675b8918fb69826decefd742a3f3c02b49988b2b439db763db2d95744a2f2456f5b32ea64e8836873ecce085875b4e054e55700198642961560529bba800a683f149c63dd5f983477f415399f229f6f292327ebac73a6707da022e472f1081421c8549d21696db7fe7bdd2ad744a5afbada9a719c26e3962cd86f8e", "Passw0rd"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static unsigned char (*saved_K1)[16];
static int any_cracked, *cracked;
static size_t cracked_size;
static int new_keys;
static struct custom_salt {
dyna_salt dsalt;
unsigned char edata1[16];
uint32_t edata2len;
unsigned char* edata2;
} *cur_salt;
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char *ptr, *keeptr;
int i;
if (strstr(ciphertext, "$SOURCE_HASH$"))
return ciphertext;
ptr = mem_alloc_tiny(strlen(ciphertext) + FORMAT_TAG_LEN + 1, MEM_ALIGN_NONE);
keeptr = ptr;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0) {
memcpy(ptr, FORMAT_TAG, FORMAT_TAG_LEN);
ptr += FORMAT_TAG_LEN;
}
for (i = 0; i < strlen(ciphertext) + 1; i++)
ptr[i] = tolower(ARCH_INDEX(ciphertext[i]));
return keeptr;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p;
char *ctcopy;
char *keeptr;
int extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if (ctcopy[0] == '*') { /* assume account's info provided */
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy = strtokm(NULL, "");
if (!ctcopy || *ctcopy != '$')
goto err;
++ctcopy; /* set after '$' */
goto edata;
}
if (ctcopy[0] == '$')
ctcopy++;
edata:
/* assume checksum */
if (((p = strtokm(ctcopy, "$")) == NULL) || strlen(p) != 32)
goto err;
/* assume edata2 following */
if (((p = strtokm(NULL, "$")) == NULL))
goto err;
if (!ishex(p) || (hexlen(p, &extra) < (64 + 16) || extra))
goto err;
if ((strtokm(NULL, "$") != NULL))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_alloc_align(sizeof(*saved_key) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
saved_K1 = mem_alloc_align(sizeof(*saved_K1) *
self->params.max_keys_per_crypt,
MEM_ALIGN_CACHE);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(cracked_size, 1);
}
static void done(void)
{
MEM_FREE(saved_K1);
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static void *get_salt(char *ciphertext)
{
int i;
static struct custom_salt cs;
char *p;
char *ctcopy;
char *keeptr;
static void *ptr;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
memset(&cs, 0, sizeof(cs));
cs.edata2 = NULL;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) == 0) {
ctcopy += FORMAT_TAG_LEN;
if (ctcopy[0] == '*') {
ctcopy++;
p = strtokm(ctcopy, "*");
ctcopy += strlen(p) + 2;
goto edata;
}
if (ctcopy[0]=='$')
ctcopy++;
}
edata:
if (((p = strtokm(ctcopy, "$")) != NULL) && strlen(p) == 32) { /* assume checksum */
for (i = 0; i < 16; i++) {
cs.edata1[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
/* skip '$' */
p += strlen(p) + 1;
/* retrieve non-constant length of edata2 */
for (i = 0; p[i] != '\0'; i++)
;
cs.edata2len = i/2;
cs.edata2 = (unsigned char*) mem_calloc_tiny(cs.edata2len + 1, sizeof(char));
for (i = 0; i < cs.edata2len; i++) { /* assume edata2 */
cs.edata2[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
}
}
MEM_FREE(keeptr);
/* following is used to fool dyna_salt stuff */
cs.dsalt.salt_cmp_offset = SALT_CMP_OFF(struct custom_salt, edata1);
cs.dsalt.salt_cmp_size = SALT_CMP_SIZE(struct custom_salt, edata1, edata2len, 0);
cs.dsalt.salt_alloc_needs_free = 0;
ptr = mem_alloc_tiny(sizeof(struct custom_salt), MEM_ALIGN_WORD);
memcpy(ptr, &cs, sizeof(struct custom_salt));
return (void *) &ptr;
}
static void set_salt(void *salt)
{
cur_salt = *(struct custom_salt**)salt;
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, strlen(key) + 1);
new_keys = 1;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
const unsigned char data[4] = {2, 0, 0, 0};
int index;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++) {
unsigned char K3[16];
#ifdef _MSC_VER
unsigned char ddata[65536];
#else
unsigned char ddata[cur_salt->edata2len + 1];
#endif
unsigned char checksum[16];
RC4_KEY rckey;
if (new_keys) {
MD4_CTX ctx;
unsigned char key[16];
UTF16 wkey[PLAINTEXT_LENGTH + 1];
int len;
len = enc_to_utf16(wkey, PLAINTEXT_LENGTH,
(UTF8*)saved_key[index],
strlen(saved_key[index]));
if (len <= 0) {
saved_key[index][-len] = 0;
len = strlen16(wkey);
}
MD4_Init(&ctx);
MD4_Update(&ctx, (char*)wkey, 2 * len);
MD4_Final(key, &ctx);
hmac_md5(key, data, 4, saved_K1[index]);
}
hmac_md5(saved_K1[index], cur_salt->edata1, 16, K3);
RC4_set_key(&rckey, 16, K3);
RC4(&rckey, 32, cur_salt->edata2, ddata);
/*
8 first bytes are nonce, then ASN1 structures
(DER encoding: type-length-data)
if length >= 128 bytes:
length is on 2 bytes and type is
\x63\x82 (encode_krb5_enc_tkt_part)
and data is an ASN1 sequence \x30\x82
else:
length is on 1 byte and type is \x63\x81
and data is an ASN1 sequence \x30\x81
next headers follow the same ASN1 "type-length-data" scheme
*/
if (((!memcmp(ddata + 8, "\x63\x82", 2)) && (!memcmp(ddata + 16, "\xA0\x07\x03\x05", 4)))
||
((!memcmp(ddata + 8, "\x63\x81", 2)) && (!memcmp(ddata + 16, "\x03\x05\x00", 3)))) {
/* check the checksum to be sure */
RC4(&rckey, cur_salt->edata2len - 32, cur_salt->edata2 + 32, ddata + 32);
hmac_md5(saved_K1[index], ddata, cur_salt->edata2len, checksum);
if (!memcmp(checksum, cur_salt->edata1, 16)) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
}
new_keys = 0;
return *pcount;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
struct fmt_main fmt_krb5tgs = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_UNICODE | FMT_UTF8 | FMT_OMP | FMT_DYNA_SALT | FMT_HUGE_INPUT,
{NULL},
{ FORMAT_TAG },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
fmt_default_binary,
get_salt,
{NULL},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_dyna_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif
|
utils.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file utils.h
* \brief Basic utilility functions.
*/
#ifndef MXNET_COMMON_UTILS_H_
#define MXNET_COMMON_UTILS_H_
#include <dmlc/logging.h>
#include <dmlc/omp.h>
#include <nnvm/graph.h>
#include <mxnet/engine.h>
#include <mxnet/ndarray.h>
#include <mxnet/op_attr_types.h>
#include <mxnet/graph_attr_types.h>
#include <nnvm/graph_attr_types.h>
#include <memory>
#include <vector>
#include <type_traits>
#include <utility>
#include <random>
#include <string>
#include <thread>
#include <algorithm>
#include <functional>
namespace mxnet {
namespace common {
template<typename xpu>
void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output);
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype`.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype) {
if (!vstorage.empty()) {
for (const auto& i : vstorage) {
if (i != stype) return false;
}
return true;
}
return false;
}
/*! \brief returns true if all storage types in `vstorage` are the same as target `stype1`
* or `stype2'. Sets boolean if both found.
* false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!vstorage.empty()) {
uint8_t has = 0;
for (const auto i : vstorage) {
if (i == stype1) {
has |= 1;
} else if (i == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as target `stype`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype) {
if (!ndarrays.empty()) {
for (const auto& nd : ndarrays) {
if (nd.storage_type() != stype) {
return false;
}
}
return true;
}
return false;
}
/*! \brief returns true if the storage types of arrays in `ndarrays`
* are the same as targets `stype1` or `stype2`. false is returned for empty inputs.
*/
inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays,
const NDArrayStorageType stype1,
const NDArrayStorageType stype2,
bool *has_both) {
if (has_both) {
*has_both = false;
}
if (!ndarrays.empty()) {
uint8_t has = 0;
for (const auto& nd : ndarrays) {
const NDArrayStorageType stype = nd.storage_type();
if (stype == stype1) {
has |= 1;
} else if (stype == stype2) {
has |= 2;
} else {
return false;
}
}
if (has_both) {
*has_both = has == 3;
}
return true;
}
return false;
}
/*! \brief get string representation of dispatch_mode */
inline std::string dispatch_mode_string(const DispatchMode x) {
switch (x) {
case DispatchMode::kFCompute:
return "fcompute";
case DispatchMode::kFComputeEx:
return "fcompute_ex";
case DispatchMode::kFComputeFallback:
return "fcompute_fallback";
case DispatchMode::kVariable:
return "variable";
case DispatchMode::kUndefined:
return "undefined";
}
return "unknown";
}
/*! \brief get string representation of storage_type */
inline std::string stype_string(const int x) {
switch (x) {
case kDefaultStorage:
return "default";
case kCSRStorage:
return "csr";
case kRowSparseStorage:
return "row_sparse";
}
return "unknown";
}
// heuristic to dermine number of threads per GPU
inline int GetNumThreadPerGPU() {
// This is resource efficient option.
return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2);
}
// heuristic to get number of matching colors.
// this decides how much parallelism we can get in each GPU.
inline int GetExecNumMatchColor() {
// This is resource efficient option.
int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1);
return std::min(num_match_color, GetNumThreadPerGPU());
}
template<typename T, typename V>
V ParallelAccumulate(const T* a, const int n, V start) {
V sum = start;
#pragma omp parallel for reduction(+:sum)
for (int i = 0; i < n; ++i) {
sum += a[i];
}
return sum;
}
/*!
* \brief
* Helper function for ParallelSort.
* DO NOT call this function directly.
* Use the interface ParallelSort instead.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSortHelper(RandomIt first, size_t len,
size_t grainsize, const Compare& comp) {
if (len < grainsize) {
std::sort(first, first+len, comp);
} else {
std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp);
ParallelSortHelper(first+len/2, len - len/2, grainsize, comp);
thr.join();
std::inplace_merge(first, first+len/2, first+len, comp);
}
}
/*!
* \brief
* Sort the elements in the range [first, last) into the ascending order defined by
* the comparator comp.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt, typename Compare>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) {
const auto num = std::distance(first, last);
size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16));
ParallelSortHelper(first, num, grainsize, comp);
}
/*!
* \brief
* Sort the elements in the range [first, last) into ascending order.
* The elements are compared using the default < operator.
* If the length of the range [first, last) is greater than a certain threshold,
* the range will be recursively divided into two and assign two threads
* to sort each half range.
* Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h
*/
template<typename RandomIt>
void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) {
ParallelSort(first, last, num_threads,
std::less<typename std::iterator_traits<RandomIt>::value_type>());
}
/*!
* \brief Random Engine
*/
typedef std::mt19937 RANDOM_ENGINE;
/*!
* \brief Helper functions.
*/
namespace helper {
/*!
* \brief Helper for non-array type `T`.
*/
template <class T>
struct UniqueIf {
/*!
* \brief Type of `T`.
*/
using SingleObject = std::unique_ptr<T>;
};
/*!
* \brief Helper for an array of unknown bound `T`.
*/
template <class T>
struct UniqueIf<T[]> {
/*!
* \brief Type of `T`.
*/
using UnknownBound = std::unique_ptr<T[]>;
};
/*!
* \brief Helper for an array of known bound `T`.
*/
template <class T, size_t kSize>
struct UniqueIf<T[kSize]> {
/*!
* \brief Type of `T`.
*/
using KnownBound = void;
};
} // namespace helper
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs a non-array type `T`. The arguments `args` are passed to the
* constructor of `T`. The function does not participate in the overload
* resolution if `T` is an array type.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param n The size of the array to construct.
* \return `std``::``unique_ptr` of an instance of type `T`.
*
* Constructs an array of unknown bound `T`. The function does not participate
* in the overload resolution unless `T` is an array of unknown bound.
*/
template <class T>
typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) {
using U = typename std::remove_extent<T>::type;
return std::unique_ptr<T>(new U[n]{});
}
/*!
* \brief Constructs an object of type `T` and wraps it in a
* `std``::``unique_ptr`.
* \param args List of arguments with which an instance of `T` will be
* constructed.
*
* Constructs an arrays of known bound is disallowed.
*/
template <class T, class... Args>
typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete;
template<typename FCompType>
FCompType GetFCompute(const nnvm::Op* op, const std::string& name,
const Context& ctx) {
static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>");
static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>");
if (ctx.dev_mask() == cpu::kDevMask) {
return fcompute_cpu.get(op, nullptr);
} else if (ctx.dev_mask() == gpu::kDevMask) {
return fcompute_gpu.get(op, nullptr);
} else {
LOG(FATAL) << "Unknown device mask";
return nullptr;
}
}
} // namespace common
} // namespace mxnet
#endif // MXNET_COMMON_UTILS_H_
|
DRB082-declared-in-func-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
A variable is declared inside a function called within a parallel region.
The variable should be shared if it uses static storage.
Data race pair: q@57:3 vs. q@57:3
*/
#include "omprace.h"
#include <omp.h>
void foo()
{
static int q;
q += 1;
}
int main()
{
omprace_init();
#pragma omp parallel
{
foo();
}
omprace_fini();
return 0;
}
|
util_test.c | /*
* This file is part of ABCDK.
*
* MIT License
*
*/
#include <stdio.h>
#include <assert.h>
#include <unistd.h>
#include <string.h>
#include <linux/serial.h>
#include "abcdk-util/general.h"
#include "abcdk-util/getargs.h"
#include "abcdk-util/geometry.h"
#include "abcdk-util/ffmpeg.h"
#include "abcdk-util/bmp.h"
#include "abcdk-util/freeimage.h"
#include "abcdk-util/uri.h"
#include "abcdk-util/html.h"
#include "abcdk-util/clock.h"
#include "abcdk-util/crc32.h"
#include "abcdk-util/robots.h"
#include "abcdk-util/dirent.h"
#include "abcdk-util/socket.h"
#include "abcdk-util/hexdump.h"
#include "abcdk-util/termios.h"
#include "abcdk-mp4/demuxer.h"
#include "abcdk-util/video.h"
#include "abcdk-util/lz4.h"
#include "abcdk-util/openssl.h"
#include "abcdk-util/redis.h"
#include "abcdk-comm/comm.h"
#include "abcdk-comm/message.h"
#include "abcdk-comm/queue.h"
#include "abcdk-comm/waiter.h"
#include "abcdk-util/json.h"
#include "abcdk-comm/easy.h"
#ifdef HAVE_FUSE
#define FUSE_USE_VERSION 29
#include <fuse.h>
#endif //
#ifdef HAVE_LIBNM
#include <libnm/NetworkManager.h>
#endif
#ifdef HAVE_MPI
#include <mpi.h>
#endif
#ifdef HAVE_ARCHIVE
#include <archive.h>
#include <archive_entry.h>
#endif
#ifdef HAVE_MODBUS
#include <modbus.h>
#endif
#ifdef HAVE_LIBUSB
#include <libusb.h>
#endif
#ifdef HAVE_MQTT
#include <mosquitto.h>
#endif
#ifdef HAVE_BLKID
#include <blkid/blkid.h>
#endif
void test_log(abcdk_tree_t *args)
{
abcdk_openlog(NULL,LOG_DEBUG,1);
for(int i = LOG_EMERG ;i<= LOG_DEBUG;i++)
syslog(i,"haha-%d",i);
}
void test_ffmpeg(abcdk_tree_t *args)
{
#ifdef HAVE_FFMPEG
for(int i = 0;i<1000;i++)
{
enum AVPixelFormat pixfmt = (enum AVPixelFormat)i;
int bits = abcdk_av_image_pixfmt_bits(pixfmt,0);
int bits_pad = abcdk_av_image_pixfmt_bits(pixfmt,1);
const char *name = abcdk_av_image_pixfmt_name(pixfmt);
printf("%s(%d): %d/%d bits.\n",name,i,bits,bits_pad);
}
#if 0
abcdk_image_t src = {AV_PIX_FMT_YUV420P,{NULL,NULL,NULL,NULL},{0,0,0,0},1920,1080};
abcdk_image_t dst = {AV_PIX_FMT_YUV420P,{NULL,NULL,NULL,NULL},{0,0,0,0},1920,1080};
abcdk_image_t dst2 = {AV_PIX_FMT_BGR32,{NULL,NULL,NULL,NULL},{0,0,0,0},1920,1080};
int src_heights[4]={0}, dst_heights[4]={0}, dst2_heights[4]={0};
abcdk_av_image_fill_heights(src_heights,src.height,src.pixfmt);
abcdk_av_image_fill_heights(dst_heights,dst.height,dst.pixfmt);
abcdk_av_image_fill_heights(dst2_heights,dst2.height,dst2.pixfmt);
abcdk_av_image_fill_strides2(&src,16);
abcdk_av_image_fill_strides2(&dst,10);
abcdk_av_image_fill_strides2(&dst2,1);
void *src_buf = abcdk_heap_alloc(abcdk_av_image_size3(&src));
void *dst_buf = abcdk_heap_alloc(abcdk_av_image_size3(&dst));
void *dst2_buf = abcdk_heap_alloc(abcdk_av_image_size3(&dst2));
abcdk_av_image_fill_pointers2(&src,src_buf);
abcdk_av_image_fill_pointers2(&dst,dst_buf);
abcdk_av_image_fill_pointers2(&dst2,dst2_buf);
abcdk_av_image_copy2(&dst,&src);
struct SwsContext *ctx = abcdk_sws_alloc2(&src,&dst2,0);
int h = sws_scale(ctx,(const uint8_t *const *)src.datas,src.strides,0,src.height,dst2.datas,dst2.strides);
//int h = sws_scale(ctx,(const uint8_t *const *)src.datas,src.strides,100,src.height,dst2.datas,dst2.strides);
printf("h = %d\n",h);
uint8_t *tmp = dst2.datas[0];
for (int i = 0; i < dst2.height; i++)
{
for (int j = 0; j < dst2.width*4; j += 4)
{
tmp[j+0] = 0;
tmp[j+1] = 0;
tmp[j+2] = 255;
}
tmp += dst2.strides[0];
}
int chk = abcdk_bmp_save2("/tmp/test_bmp.bmp",dst2.datas[0],dst2.strides[0],dst2.width,dst2.height,32);
assert(chk==0);
abcdk_sws_free(&ctx);
abcdk_heap_free(src_buf);
abcdk_heap_free(dst_buf);
abcdk_heap_free(dst2_buf);
#endif
#endif //
}
void test_bmp(abcdk_tree_t *args)
{
const char *src_file = abcdk_option_get(args,"--src-file",0,"");
const char *dst_file = abcdk_option_get(args,"--dst-file",0,"");
uint32_t stride = 0;
uint32_t width = 0;
int32_t height = 0;
uint8_t bits = 0;
int chk = abcdk_bmp_load2(src_file, NULL, 0, 13, &stride, &width, &height, &bits);
assert(chk == 0);
printf("s=%u,w=%u,h=%d,b=%hhu\n",stride,width,height,bits);
uint8_t *data = abcdk_heap_alloc(stride*height);
chk = abcdk_bmp_load2(src_file, data, stride*height, 1, &stride, &width, &height, &bits);
assert(chk == 0);
chk = abcdk_bmp_save2(dst_file, data, stride, width, height, bits);
assert(chk == 0);
abcdk_heap_free(data);
}
void test_freeimage(abcdk_tree_t *args)
{
#ifdef FREEIMAGE_H
abcdk_fi_init(1);
abcdk_fi_init(1);//test run once.
abcdk_fi_log2syslog();
const char *src_file = abcdk_option_get(args,"--src-file",0,"");
const char *dst_file = abcdk_option_get(args,"--dst-file",0,"");
uint8_t *data = NULL;
uint32_t stride = 0;
uint32_t width = 0;
uint32_t height = 0;
uint8_t bits = 0;
uint32_t xbytes = 0;
FREE_IMAGE_FORMAT src_fmt = FreeImage_GetFileType(src_file,0);
FIBITMAP *dib = abcdk_fi_load2(src_fmt,0,src_file);
assert(dib!=NULL);
width = FreeImage_GetWidth(dib);
height = FreeImage_GetHeight(dib);
abcdk_resize_t r = {0};
int dst_w = 500;
int dst_h = 1100;
abcdk_resize_ratio_2d(&r,width,height,dst_w,dst_h,0);
FIBITMAP *dib2 = FreeImage_RescaleRect(dib,r.x_factor *width,r.y_factor*height,0,0,width,height,FILTER_BICUBIC,0);
if(dib2)
{
FreeImage_Unload(dib);
dib = dib2;
}
dib2 = FreeImage_ConvertTo24Bits(dib);
if(dib2)
{
FreeImage_Unload(dib);
dib = dib2;
}
int left = abcdk_resize_src2dst_2d(&r,0,1);
int top = abcdk_resize_src2dst_2d(&r,0,0);
dib2 = FreeImage_Allocate(dst_w,dst_h,24,0,0,0);
FreeImage_Paste(dib2,dib,left,top,1000);
if(dib2)
{
FreeImage_Unload(dib);
dib = dib2;
}
data = FreeImage_GetBits(dib);
stride = FreeImage_GetPitch(dib);
width = FreeImage_GetWidth(dib);
height = FreeImage_GetHeight(dib);
bits = FreeImage_GetBPP(dib);
xbytes = FreeImage_GetLine(dib);
// FreeImage_FlipHorizontal(dib);
// FreeImage_FlipVertical(dib);
//FreeImage_AdjustBrightness(dib,100);
FreeImage_Invert(dib);
#if 1
int chk = abcdk_fi_save2(FIF_JPEG,JPEG_QUALITYGOOD,dst_file, data, stride, width, height, bits);
assert(chk == 0);
#else
BOOL chk = FreeImage_Save(FIF_JPEG,dib,dst_file,JPEG_QUALITYGOOD);
assert(chk);
#endif
FreeImage_Unload(dib);
abcdk_fi_uninit();
abcdk_fi_uninit();//test run once.
#endif //FREEIMAGE_H
}
void test_uri(abcdk_tree_t *args)
{
const char *uri = abcdk_option_get(args,"--uri",0,"");
abcdk_allocator_t * alloc = abcdk_uri_split(uri);
assert(alloc);
for(size_t i = 0;i<alloc->numbers;i++)
printf("[%ld]: %s\n",i,alloc->pptrs[i]);
abcdk_allocator_unref(&alloc);
}
void test_strrep(abcdk_tree_t *args)
{
char buf[]={"abcab| |cabcabc"};
char *p = abcdk_strrep(buf," ","",1);
printf("%s\n",p);
abcdk_heap_free(p);
}
/**/
const char *_test_html_cntrl_replace(char *text, char c)
{
if(!text)
return "";
char *tmp = text;
while (*tmp)
{
if (iscntrl(*tmp))
*tmp = c;
tmp += 1;
}
return text;
}
static int _test_html_dump_cb(size_t deep, abcdk_tree_t *node, void *opaque)
{
if(deep==0)
{
abcdk_tree_fprintf(stderr,deep,node,"%s\n",".");
}
else
{
abcdk_tree_fprintf(stderr, deep, node, "%s:<%s>\n",
ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_HTML_KEY], 0),
_test_html_cntrl_replace(ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_HTML_VALUE], 0), ' '));
}
return 1;
}
void test_html(abcdk_tree_t *args)
{
const char *file = abcdk_option_get(args,"--file",0,"");
// abcdk_clock_dot(NULL);
abcdk_tree_t *t = abcdk_html_parse_file(file);
// printf("%lu\n",abcdk_clock_step(NULL));
abcdk_tree_iterator_t it = {0,_test_html_dump_cb,NULL};
abcdk_tree_scan(t,&it);
abcdk_tree_free(&t);
}
void test_fnmatch(abcdk_tree_t *args)
{
// char str[]={"abcd?*Qcde"};
// char wd[]={"abc?\\?\\*q*****e"};
char str[]={"/gp/aag/mainA?123456seller=ABVFEJU8LS620"};
char wd[]={"/gp/aag/main\\?\\?*seller=ABVFEJU8LS620"};
int chk = abcdk_fnmatch(str,wd,0,0);
assert(chk==0);
}
void test_crc32(abcdk_tree_t *args)
{
// uint32_t sum = abcdk_crc32_sum("abc",3,0);
// printf("%u\n",sum);
#pragma omp parallel for num_threads(30)
for (int i = 0; i < 300000000; i++)
{
uint32_t sum2 = abcdk_crc32_sum("abc",3,0);
assert(891568578 ==sum2);
}
}
typedef struct _robots_match
{
int flag;
const char *path;
}robots_match_t;
static int _test_robots_dump_cb(size_t deep, abcdk_tree_t *node, void *opaque)
{
if (deep == 0)
{
abcdk_tree_fprintf(stderr,deep, node, "%s\n", ".");
}
else
{
if (opaque)
{
robots_match_t *m = (robots_match_t*)opaque;
int chk = abcdk_fnmatch(m->path,ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_VALUE], 0),0,0);
if(chk==0)
{
if(abcdk_strcmp(ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_KEY], 0),"Disallow",0)==0)
m->flag = 2;
if(abcdk_strcmp(ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_KEY], 0),"Allow",0)==0)
m->flag = 1;
}
else
{
// m->flag = -1;
}
}
else
{
abcdk_tree_fprintf(stderr,deep, node, "%s: %s\n",
ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_KEY], 0),
ABCDK_PTR2I8PTR(node->alloc->pptrs[ABCDK_ROBOTS_VALUE], 0));
}
}
return 1;
}
void test_robots(abcdk_tree_t *args)
{
const char *file = abcdk_option_get(args,"--file",0,"");
const char *agent = abcdk_option_get(args,"--agent",0,"*");
robots_match_t m = {0};
m.path = abcdk_option_get(args,"--path",0,"");
abcdk_tree_t *t = abcdk_robots_parse_file(file,agent);
abcdk_tree_iterator_t it = {0,_test_robots_dump_cb,NULL};
abcdk_tree_scan(t,&it);
it.opaque = &m;
abcdk_tree_scan(t,&it);
printf("flag=%d\n",m.flag);
abcdk_tree_free(&t);
}
#ifdef _FUSE_H_
#define MP4_PATH "/home/devel/job/tmp/"
/**/
int fuse_open(const char *file, struct fuse_file_info *info)
{
syslog(LOG_INFO,"%s(%d): %s",__FUNCTION__,__LINE__,file);
char tmp[PATH_MAX]={0};
abcdk_dirdir(tmp,MP4_PATH);
abcdk_dirdir(tmp,file);
int fd = abcdk_open(tmp, 0, 0, 0);
if (fd < 0)
return -errno;
info->fh = fd;
info->direct_io = 1;
info->keep_cache = 0;
return 0;
}
int fuse_read(const char *file, char *buffer, size_t size, off_t offset, struct fuse_file_info *info)
{
syslog(LOG_INFO, "%s(%d): %s (fd=%lu)", __FUNCTION__, __LINE__, file, info->fh);
syslog(LOG_INFO, "%s(%d): size=%lu off=%ld", __FUNCTION__, __LINE__, size, offset);
assert(info->fh != -1);
int fd = info->fh;
ssize_t rlen = pread(fd, buffer, size, offset);
if(rlen != size)
sleep(10);
else
usleep(40*1000);
return (rlen >= 0 ? rlen : -errno);
}
int fuse_release(const char* file, struct fuse_file_info *info)
{
syslog(LOG_INFO, "%s(%d): %s (fd=%lu)", __FUNCTION__, __LINE__, file, info->fh);
assert(info->fh != -1);
int fd = info->fh;
abcdk_closep(&fd);
return 0;
}
int fuse_getattr(const char *file, struct stat* attr)
{
syslog(LOG_INFO,"%s(%d): %s",__FUNCTION__,__LINE__,file);
// if (abcdk_strcmp(file, "/") == 0)
// {
// }
// else
// {
char tmp[PATH_MAX] = {0};
abcdk_dirdir(tmp, MP4_PATH);
abcdk_dirdir(tmp, file);
int chk = lstat(tmp, attr);
if (chk != 0)
return -errno;
attr->st_dev = 1000;
clock_gettime(CLOCK_REALTIME, &attr->st_ctim);
attr->st_mtim = attr->st_ctim;
attr->st_size = INTMAX_MAX;
// }
return 0;
}
int fuse_fgetattr(const char* file, struct stat* attr, struct fuse_file_info * info)
{
syslog(LOG_INFO, "%s(%d): %s (fd=%lu)", __FUNCTION__, __LINE__, file, info->fh);
assert(info->fh != -1);
int fd = info->fh;
int chk = fstat(fd,attr);
if(chk != 0 )
return -errno;
attr->st_dev = 1000;
attr->st_size = INTMAX_MAX;
return 0;
}
#endif //_FUSE_H_
void test_fuse(abcdk_tree_t *args)
{
#ifdef _FUSE_H_
const char *name_p = abcdk_option_get(args,"--name",0,"test_fuse");
const char *mpoint_p = abcdk_option_get(args,"--mpoint",0,"");
if (strlen(name_p) <= 0)
{
syslog(LOG_ERR, "--name must have parameters.");
return;
}
if (access(mpoint_p, R_OK) != 0)
{
syslog(LOG_ERR, "--mpoint must have parameters and exist.");
return;
}
static struct fuse_operations opts = {0};
opts.read = fuse_read;
opts.open = fuse_open;
opts.release = fuse_release;
opts.getattr = fuse_getattr;
opts.fgetattr = fuse_fgetattr;
int fuse_argc = 4;
char **fuse_argv = (char**)abcdk_heap_alloc(fuse_argc*sizeof(char*));
fuse_argv[0] = abcdk_heap_clone(name_p,strlen(name_p));
fuse_argv[1] = abcdk_heap_clone(mpoint_p,strlen(mpoint_p));
fuse_argv[2] = abcdk_heap_clone("-o",2);
fuse_argv[3] = abcdk_heap_clone("allow_other,auto_cache,kernel_cache",35);
fuse_main(fuse_argc, fuse_argv, &opts, NULL);
#endif //_FUSE_H_
}
#if 0
int _mp4_read(abcdk_buffer_t *buf, void *data, size_t size)
{
ssize_t r = abcdk_buffer_read(buf, data, size);
if (r <= 0)
return -2;
else if (r != size)
return -1;
return 0;
}
int _mp4_read_u16(abcdk_buffer_t *buf, uint16_t *data)
{
if (_mp4_read(buf, data, sizeof(uint16_t)))
return -1;
*data = abcdk_endian_b_to_h16(*data);
return 0;
}
int _mp4_read_u24(abcdk_buffer_t *buf, uint8_t *data)
{
if (_mp4_read(buf, data, sizeof(uint8_t)*3))
return -1;
abcdk_endian_b_to_h(data,3);
return 0;
}
int _mp4_read_u32(abcdk_buffer_t *buf, uint32_t *data)
{
if (_mp4_read(buf, data, sizeof(uint32_t)))
return -1;
*data = abcdk_endian_b_to_h32(*data);
return 0;
}
int _mp4_read_u64(abcdk_buffer_t *buf,uint64_t *data)
{
if (_mp4_read(buf, data, sizeof(uint64_t)))
return -1;
*data = abcdk_endian_b_to_h64(*data);
return 0;
}
int _mp4_skip_size(abcdk_buffer_t *buf,uint64_t size)
{
size_t all = 0;
char tmp[1000];
while(all<size)
{
size_t s = ABCDK_MIN(1000,size-all);
if (_mp4_read(buf, tmp, s))
return -1;
all += s;
}
return 0;
}
void _mp4_dump_ftyp(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_ftyp_t *cont = (abcdk_mp4_atom_ftyp_t *)&atom->data;
fprintf(stdout, "major='%c%c%c%c',", cont->major.u8[0], cont->major.u8[1], cont->major.u8[2], cont->major.u8[3] );
fprintf(stdout, "minor='%d',", cont->minor);
fprintf(stdout, "compatible=");
for (size_t i = 0; i < cont->compat->numbers; i++)
{
abcdk_mp4_tag_t *brand = (abcdk_mp4_tag_t *)cont->compat->pptrs[i];
if(!brand->u32)
continue;
fprintf(stdout, "'%c%c%c%c' ", brand->u8[0], brand->u8[1], brand->u8[2], brand->u8[3]);
}
}
void _mp4_dump_mvhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mvhd_t *cont = (abcdk_mp4_atom_mvhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,",cont->version);
if(cont->ctime>=0x7C25B080)
cont->ctime -= 0x7C25B080;
struct tm t;
gmtime_r(&cont->ctime,&t);
fprintf(stdout, "ctime=%d-%02d-%02d %02d:%02d:%02d,",t.tm_year+1900,t.tm_mon+1,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec);
if(cont->mtime>=0x7C25B080)
cont->mtime -= 0x7C25B080;
struct tm t2;
gmtime_r(&cont->mtime,&t2);
fprintf(stdout, "mtime=%d-%02d-%02d %02d:%02d:%02d,",t2.tm_year+1900,t2.tm_mon+1,t2.tm_mday,t2.tm_hour,t2.tm_min,t2.tm_sec);
fprintf(stdout, "timescale=%u,",cont->timescale);
fprintf(stdout, "duration=%lu,",cont->duration);
fprintf(stdout, "rate=%hu.%hu,",cont->rate>>16,cont->rate&0xffff);
fprintf(stdout, "long=%lu(sec),",cont->duration/cont->timescale);
fprintf(stdout, "nexttrackid=%u,",cont->nexttrackid);
}
void _mp4_dump_tkhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tkhd_t *cont = (abcdk_mp4_atom_tkhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,",cont->version);
if(cont->ctime>=0x7C25B080)
cont->ctime -= 0x7C25B080;
struct tm t;
gmtime_r(&cont->ctime,&t);
fprintf(stdout, "ctime=%d-%02d-%02d %02d:%02d:%02d,",t.tm_year+1900,t.tm_mon+1,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec);
if(cont->mtime>=0x7C25B080)
cont->mtime -= 0x7C25B080;
struct tm t2;
gmtime_r(&cont->mtime,&t2);
fprintf(stdout, "mtime=%d-%02d-%02d %02d:%02d:%02d,",t2.tm_year+1900,t2.tm_mon+1,t2.tm_mday,t2.tm_hour,t2.tm_min,t2.tm_sec);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "duration=%lu,",cont->duration);
fprintf(stdout, "width=%hu.%hu,",cont->width>>16,cont->width&0xffff);
fprintf(stdout, "height=%hu.%hu,",cont->height>>16,cont->height&0xffff);
}
void _mp4_dump_mdhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mdhd_t *cont = (abcdk_mp4_atom_mdhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,",cont->version);
if(cont->ctime>=0x7C25B080)
cont->ctime -= 0x7C25B080;
struct tm t;
gmtime_r(&cont->ctime,&t);
fprintf(stdout, "ctime=%d-%02d-%02d %02d:%02d:%02d,",t.tm_year+1900,t.tm_mon+1,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec);
if(cont->mtime>=0x7C25B080)
cont->mtime -= 0x7C25B080;
struct tm t2;
gmtime_r(&cont->mtime,&t2);
fprintf(stdout, "mtime=%d-%02d-%02d %02d:%02d:%02d,",t2.tm_year+1900,t2.tm_mon+1,t2.tm_mday,t2.tm_hour,t2.tm_min,t2.tm_sec);
fprintf(stdout, "timescale=%u,",cont->timescale);
fprintf(stdout, "duration=%lu,",cont->duration);
fprintf(stdout, "lang=%hu,",cont->language);
fprintf(stdout, "quality=%hu,",cont->quality);
}
void _mp4_dump_hdlr(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_hdlr_t *cont = (abcdk_mp4_atom_hdlr_t *)&atom->data;
fprintf(stdout,"type=%c%c%c%c, ",
cont->type.u8[0], cont->type.u8[1], cont->type.u8[2], cont->type.u8[3]);
fprintf(stdout,"subtype=%c%c%c%c, ",
cont->subtype.u8[0], cont->subtype.u8[1], cont->subtype.u8[2], cont->subtype.u8[3]);
if(cont->name)
fprintf(stdout,"name='%s' ",cont->name->pptrs[0]);
}
void _mp4_dump_vmhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_vmhd_t *cont = (abcdk_mp4_atom_vmhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "mode=%u,",cont->mode);
fprintf(stdout, "opcolor=%hu,%hu,%hu",cont->opcolor[0],cont->opcolor[1],cont->opcolor[2]);
}
void _mp4_dump_stts(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stts_t *cont = (abcdk_mp4_atom_stts_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers;i++)
{
fprintf(stdout, "count=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "duration=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
}
}
void _mp4_dump_ctts(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_ctts_t *cont = (abcdk_mp4_atom_ctts_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "count=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "offset=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
}
}
void _mp4_dump_stsc(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stsc_t *cont = (abcdk_mp4_atom_stsc_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "Firstchunk=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "perchunk=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
fprintf(stdout, "ID=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],8));
}
}
void _mp4_dump_stsz(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stsz_t *cont = (abcdk_mp4_atom_stsz_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "[samplesize=%u],",cont->samplesize);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
}
}
void _mp4_dump_stco(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stco_t *cont = (abcdk_mp4_atom_stco_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "%lu,",ABCDK_PTR2U64(cont->tables->pptrs[i],0));
}
}
void _mp4_dump_stss(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_stss_t *cont = (abcdk_mp4_atom_stss_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i < 10;i++)
{
fprintf(stdout, "%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
}
}
void _mp4_dump_smhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_smhd_t *cont = (abcdk_mp4_atom_smhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "balance=%hhu.%hhu",cont->balance>>8,cont->balance&0xff);
}
void _mp4_dump_elst(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_elst_t *cont = (abcdk_mp4_atom_elst_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers && i<10;i++)
{
fprintf(stdout, "duration=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "time=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
fprintf(stdout, "rate=%hu.%hu,",
ABCDK_PTR2U32(cont->tables->pptrs[i],8)>>16,
ABCDK_PTR2U32(cont->tables->pptrs[i],8)&&0xffff);
}
}
void _mp4_dump_mehd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mehd_t *cont = (abcdk_mp4_atom_mehd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "duration=%lu",cont->duration);
}
void _mp4_dump_trex(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_trex_t *cont = (abcdk_mp4_atom_trex_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "sample_desc_index=%u,",cont->default_sample_desc_index);
fprintf(stdout, "duration=%lu,",cont->default_duration);
fprintf(stdout, "sample_sample_size=%u,",cont->default_samplesize);
fprintf(stdout, "sample_flags=%08x",cont->default_sampleflags);
}
void _mp4_dump_mfhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mfhd_t *cont = (abcdk_mp4_atom_mfhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "sn=%lu,",cont->sn);
}
void _mp4_dump_tfhd(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tfhd_t *cont = (abcdk_mp4_atom_tfhd_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "base_data_offset=%lu,",cont->base_data_offset);
fprintf(stdout, "sample_desc_index=%u,",cont->sample_desc_index);
fprintf(stdout, "duration=%lu,",cont->default_duration);
fprintf(stdout, "sample_sample_size=%u,",cont->default_samplesize);
fprintf(stdout, "sample_flags=%08x",cont->default_sampleflags);
}
void _mp4_dump_tfdt(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tfdt_t *cont = (abcdk_mp4_atom_tfdt_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "time=%lu,",cont->base_decode_time);
}
void _mp4_dump_trun(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_trun_t *cont = (abcdk_mp4_atom_trun_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "offset=%u,",cont->data_offset);
fprintf(stdout, "flags=%08x,",cont->first_sample_flags);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers;i++)
{
fprintf(stdout, "[%ld]={",i);
fprintf(stdout, "duration=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],0));
fprintf(stdout, "size=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],4));
fprintf(stdout, "flags=%08x,",ABCDK_PTR2U32(cont->tables->pptrs[i],8));
fprintf(stdout, "offset=%u",ABCDK_PTR2U32(cont->tables->pptrs[i],12));
fprintf(stdout, "},");
}
}
void _mp4_dump_mfro(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_mfro_t *cont = (abcdk_mp4_atom_mfro_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "size=%lu,",cont->size);
}
void _mp4_dump_tfra(size_t deep, abcdk_tree_t *node, void *opaque)
{
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
abcdk_mp4_atom_tfra_t *cont = (abcdk_mp4_atom_tfra_t *)&atom->data;
fprintf(stdout, "version=%hhu,flag=[%08x],",cont->version,cont->flags);
fprintf(stdout, "trackid=%u,",cont->trackid);
fprintf(stdout, "size_traf_num=%hhu,",cont->length_size_traf_num);
fprintf(stdout, "size_trun_num=%hhu,",cont->length_size_trun_num);
fprintf(stdout, "size_sample_num=%hhu,",cont->length_size_sample_num);
if(!cont->tables)
return;
for(size_t i= 0 ;i<cont->tables->numbers;i++)
{
fprintf(stdout, "[%ld]={",i);
fprintf(stdout, "time=%lu,",ABCDK_PTR2U64(cont->tables->pptrs[i],0));
fprintf(stdout, "moof offset=%lu,",ABCDK_PTR2U64(cont->tables->pptrs[i],8));
fprintf(stdout, "traf=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],16));
fprintf(stdout, "trun=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],20));
fprintf(stdout, "sample=%u,",ABCDK_PTR2U32(cont->tables->pptrs[i],24));
fprintf(stdout, "},");
}
}
static int atoms =0;
int mp4_dump_cb(size_t deep, abcdk_tree_t *node, void *opaque)
{
if (deep == -1)
return -1;
atoms += 1;
int fd = (int64_t)opaque;
abcdk_mp4_atom_t *atom = (abcdk_mp4_atom_t *)node->alloc->pptrs[0];
if (deep == 0)
{
abcdk_tree_fprintf(stdout, deep, node, ".\n");
}
else
{
abcdk_tree_fprintf(stdout, deep, node, "offset=%lu,size=%lu,type=%c%c%c%c: ",
atom->off_head, atom->size, atom->type.u8[0], atom->type.u8[1], atom->type.u8[2], atom->type.u8[3]);
if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_FTYP)
_mp4_dump_ftyp(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MVHD)
_mp4_dump_mvhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TKHD)
_mp4_dump_tkhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MDHD)
_mp4_dump_mdhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_HDLR)
_mp4_dump_hdlr(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_VMHD)
_mp4_dump_vmhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STTS)
_mp4_dump_stts(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_CTTS)
_mp4_dump_ctts(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STSC)
_mp4_dump_stsc(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STSZ)
_mp4_dump_stsz(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STCO)
_mp4_dump_stco(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_STSS)
_mp4_dump_stss(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_SMHD)
_mp4_dump_smhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_ELST)
_mp4_dump_elst(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MEHD)
_mp4_dump_mehd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TREX)
_mp4_dump_trex(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MFHD)
_mp4_dump_mfhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TFHD)
_mp4_dump_tfhd(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TFDT)
_mp4_dump_tfdt(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TRUN)
_mp4_dump_trun(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MFRO)
_mp4_dump_mfro(deep, node, opaque);
else if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_TFRA)
_mp4_dump_tfra(deep, node, opaque);
fprintf(stdout, " \n");
// if (atom->type.u32 == ABCDK_MP4_ATOM_TYPE_MOOF)
// return -1;
}
// if(atoms>70)
// return -1;
return 1;
}
#endif
void show_mp4_info(int fd)
{
abcdk_tree_t *root = abcdk_mp4_read_probe(fd,0,-1UL, NULL);
abcdk_tree_t *video_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_TRAK,1,1);
abcdk_tree_t *avc1_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVC1,1,1);
abcdk_tree_t *avcc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVCC,1,1);
abcdk_mp4_atom_t *avc1 = (abcdk_mp4_atom_t*)avc1_p->alloc->pptrs[0];
abcdk_mp4_atom_t *avcc = (abcdk_mp4_atom_t*)avcc_p->alloc->pptrs[0];
#ifdef HAVE_FFMPEG
AVCodecContext *enc_ctx = abcdk_avcodec_alloc(abcdk_avcodec_find2(AV_CODEC_ID_H264,0));
enc_ctx->extradata_size = avcc->data.avcc.extradata->sizes[0];
enc_ctx->extradata = av_mallocz(avcc->data.avcc.extradata->sizes[0]);
memcpy(enc_ctx->extradata,avcc->data.avcc.extradata->pptrs[0],avcc->data.avcc.extradata->sizes[0]);
enc_ctx->width = avc1->data.sample_desc.detail.video.width;
enc_ctx->height = avc1->data.sample_desc.detail.video.height;
enc_ctx->pix_fmt = AV_PIX_FMT_YUV420P;
assert(abcdk_avcodec_open(enc_ctx,NULL)==0);
AVFrame *frame_p = av_frame_alloc();
AVPacket packet = {0};
av_init_packet(&packet);
packet.data = 0;
packet.size = 0;
packet.stream_index = 0;
assert(abcdk_avcodec_decode(enc_ctx,frame_p,&packet)>=0);
av_frame_free(&frame_p);
av_packet_unref(&packet);
abcdk_avcodec_free(&enc_ctx);
#endif //HAVE_FFMPEG
abcdk_tree_free(&root);
}
void collect_fmp4_video(int fd)
{
int fd2 = abcdk_open("/tmp/abcdk2.h264",1,0,1);
// ftruncate(fd2,0);
lseek(fd2,0,SEEK_END);
abcdk_mp4_tag_t a;
a.u32 = ABCDK_MP4_ATOM_MKTAG('\0','\0','\0','\1');
char *buf= abcdk_heap_alloc(1024*1024*16);
abcdk_tree_t *root = abcdk_mp4_read_probe2(fd,0,-1UL, 0);
abcdk_tree_t *moov_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_MOOV,1,1);
abcdk_tree_t *mvex_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_MVEX,1,1);
abcdk_mp4_dump(stdout,moov_p);
abcdk_tree_t *avcc_p = abcdk_mp4_find2(moov_p,ABCDK_MP4_ATOM_TYPE_AVCC,1,1);
abcdk_mp4_atom_t *avcc = (abcdk_mp4_atom_t*)avcc_p->alloc->pptrs[0];
abcdk_tree_t *mehd_p = abcdk_mp4_find2(mvex_p,ABCDK_MP4_ATOM_TYPE_MEHD,1,1);
abcdk_mp4_atom_t *mehd = (abcdk_mp4_atom_t*)mehd_p->alloc->pptrs[0];
abcdk_tree_t *moof_p = abcdk_tree_child(root,1);
while (moof_p)
{
abcdk_mp4_atom_t *moof = (abcdk_mp4_atom_t*)moof_p->alloc->pptrs[0];
if(moof->type.u32 == ABCDK_MP4_ATOM_TYPE_MOOF)
{
abcdk_tree_t *mfhd_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_MFHD, 1, 1);
abcdk_tree_t *tfhd_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_TFHD, 1, 1);
abcdk_tree_t *tfdt_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_TFDT, 1, 1);
abcdk_tree_t *trun_p = abcdk_mp4_find2(moof_p, ABCDK_MP4_ATOM_TYPE_TRUN, 1, 1);
abcdk_mp4_atom_t *mfhd = (abcdk_mp4_atom_t *)mfhd_p->alloc->pptrs[0];
abcdk_mp4_atom_t *tfhd = (abcdk_mp4_atom_t *)tfhd_p->alloc->pptrs[0];
abcdk_mp4_atom_t *tfdt = (abcdk_mp4_atom_t *)tfdt_p->alloc->pptrs[0];
abcdk_mp4_atom_t *trun = (abcdk_mp4_atom_t *)trun_p->alloc->pptrs[0];
#if 1
printf("-----------------------------------mfhd---------------------------------------\n");
printf("Sequence_Number: %lu\n", mfhd->data.mfhd.sequence_number);
printf("-----------------------------------mfhd---------------------------------------\n");
printf("-----------------------------------tfhd---------------------------------------\n");
printf("TrackID: %u\n", tfhd->data.tfhd.trackid);
printf("Base_Data_Offset: %lu\n", tfhd->data.tfhd.base_data_offset);
printf("Sample_Desc_Index: %u\n", tfhd->data.tfhd.sample_desc_idx);
printf("-----------------------------------tfhd---------------------------------------\n");
printf("-----------------------------------tfdt---------------------------------------\n");
printf("base_decode_time: %lu\n", tfdt->data.tfdt.base_decode_time);
printf("-----------------------------------tfdt---------------------------------------\n");
printf("-----------------------------------trun---------------------------------------\n");
printf("Data_Offset: %u\n", trun->data.trun.data_offset);
printf("First_Sample_Flags: %08x\n", trun->data.trun.first_sample_flags);
printf("Numbers: %u\n", trun->data.trun.numbers);
uint64_t duration_start = tfdt->data.tfdt.base_decode_time;
for (size_t i = 0; i < trun->data.trun.numbers; i++)
{
uint64_t duration = tfhd->data.tfhd.sample_duration;
duration = tfhd->data.tfhd.sample_duration;
if(trun->data.trun.flags & ABCDK_MP4_TRUN_FLAG_SAMPLE_DURATION_PRESENT)
duration = trun->data.trun.tables[i].sample_duration;
duration_start += duration;
printf("Size: %u,PTS: %lu(%lu+%d), DUR: %lu\n",
trun->data.trun.tables[i].sample_size,
duration_start+trun->data.trun.tables[i].composition_offset,
duration_start,
trun->data.trun.tables[i].composition_offset,
duration);
}
printf("-----------------------------------trun---------------------------------------\n");
#else
if (tfhd->data.tfhd.trackid == 1)
{
lseek(fd, moof->off_head + trun->data.trun.data_offset, SEEK_SET);
for (size_t i = 0; i < trun->data.trun.numbers; i++)
{
abcdk_mp4_read(fd, buf, trun->data.trun.tables[i].sample_size);
abcdk_write(fd2, &a.u32, 4);
abcdk_write(fd2, avcc->data.avcc.sps->pptrs[0], avcc->data.avcc.sps->sizes[0]);
abcdk_write(fd2, &a.u32, 4);
abcdk_write(fd2, avcc->data.avcc.pps->pptrs[0], avcc->data.avcc.pps->sizes[0]);
memcpy(buf, &a.u32, 4); //替换长度
abcdk_write(fd2, buf, trun->data.trun.tables[i].sample_size);
}
}
#endif
}
moof_p = abcdk_tree_sibling(moof_p,0);
}
abcdk_heap_free(buf);
abcdk_tree_free(&root);
abcdk_closep(&fd2);
}
void collect_mp4_video(int fd)
{
abcdk_tree_t *root = abcdk_mp4_read_probe2(fd,0,-1UL, 0);
abcdk_mp4_dump(stdout,root);
abcdk_tree_t *video_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_TRAK,1,1);
abcdk_tree_t *stsz_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSZ,1,1);
abcdk_tree_t *stss_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSS,1,1);
abcdk_tree_t *stts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STTS,1,1);
abcdk_tree_t *ctts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_CTTS,1,1);
abcdk_tree_t *stsc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSC,1,1);
abcdk_tree_t *stco_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STCO,1,1);
abcdk_tree_t *avc1_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVC1,1,1);
abcdk_tree_t *avcc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_AVCC,1,1);
abcdk_mp4_atom_t *stsz = (abcdk_mp4_atom_t*)stsz_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stss = (abcdk_mp4_atom_t*)stss_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stts = (abcdk_mp4_atom_t*)stts_p->alloc->pptrs[0];
abcdk_mp4_atom_t *ctts = (abcdk_mp4_atom_t*)ctts_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stco = (abcdk_mp4_atom_t*)stco_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stsc = (abcdk_mp4_atom_t*)stsc_p->alloc->pptrs[0];
abcdk_mp4_atom_t *avc1 = (abcdk_mp4_atom_t*)avc1_p->alloc->pptrs[0];
abcdk_mp4_atom_t *avcc = (abcdk_mp4_atom_t*)avcc_p->alloc->pptrs[0];
char sps[200] = {0};
abcdk_bin2hex(sps,avcc->data.avcc.sps->pptrs[0],avcc->data.avcc.sps->sizes[0],0);
printf("SPS:[%s]\n",sps);
char pps[200] = {0};
abcdk_bin2hex(pps,avcc->data.avcc.pps->pptrs[0],avcc->data.avcc.pps->sizes[0],0);
printf("PPS:[%s]\n",pps);
#if 0
printf("-----------------------------------stsz---------------------------------------\n");
printf("Size: %u\n",stsz->data.stsz.sample_size);
printf("Numbers: %u\n",stsz->data.stsz.numbers);
for (size_t i = 0; i < stsz->data.stsz.numbers; i++)
{
uint64_t dts = 0;
uint32_t dur = 0;
int32_t cts = 0;
abcdk_mp4_stts_tell(&stts->data.stts,i+1,&dts,&dur);
abcdk_mp4_ctts_tell(&ctts->data.ctts,i+1,&cts);
printf("Size[%lu]: %u, PTS: %lu(%lu+%d) DUR: %u, KEY: %s\n",
i+1,stsz->data.stsz.tables[i].size,dts+cts,dts,cts,dur,
(abcdk_mp4_stss_tell(&stss->data.stss,i+1)?"No":"Yes") );
}
printf("-----------------------------------stsz---------------------------------------\n");
printf("-----------------------------------stss---------------------------------------\n");
printf("Numbers: %u\n",stss->data.stss.numbers);
for (size_t i = 0; i < stss->data.stss.numbers; i++)
{
printf("KeyFrame[%lu]: %u\n",i+1,stss->data.stss.tables[i].sync);
}
printf("-----------------------------------stss---------------------------------------\n");
printf("-----------------------------------stts---------------------------------------\n");
printf("Numbers: %u\n",stts->data.stts.numbers);
for (size_t i = 0; i < stts->data.stts.numbers; i++)
{
printf("Count[%lu]: %u\n",i+1,stts->data.stts.tables[i].sample_count);
printf("Duration[%lu]: %u\n",i+1,stts->data.stts.tables[i].sample_duration);
}
printf("-----------------------------------stts---------------------------------------\n");
printf("-----------------------------------ctts---------------------------------------\n");
printf("Numbers: %u\n",ctts->data.ctts.numbers);
for (size_t i = 0; i < ctts->data.ctts.numbers; i++)
{
printf("Count[%lu]: %u\n",i+1,ctts->data.ctts.tables[i].sample_count);
printf("Offset[%lu]: %u\n",i+1,ctts->data.ctts.tables[i].composition_offset);
}
printf("-----------------------------------ctts---------------------------------------\n");
printf("-----------------------------------stco---------------------------------------\n");
printf("Numbers: %u\n",stco->data.stco.numbers);
for (size_t i = 0; i < stco->data.stco.numbers; i++)
{
printf("Offset[%lu]: %lu\n",i+1,stco->data.stco.tables[i].offset);
}
printf("-----------------------------------stco---------------------------------------\n");
printf("-----------------------------------stsc---------------------------------------\n");
printf("Numbers: %u\n",stsc->data.stsc.numbers);
for(size_t i= 0 ;i<stsc->data.stsc.numbers;i++)
{
printf("First_Chunk: %u\n",stsc->data.stsc.tables[i].first_chunk);
printf("PerChunk: %u\n",stsc->data.stsc.tables[i].samples_perchunk);
printf("ID: %u\n",stsc->data.stsc.tables[i].sample_desc_id);
}
printf("-----------------------------------stsc---------------------------------------\n");
#else
int fd2 = abcdk_open("/tmp/abcdk.h264",1,0,1);
// ftruncate(fd2,0);
lseek(fd2,0,SEEK_END);
// abcdk_write(fd2,avcc->data.avcc.extradata->pptrs[0],avcc->data.avcc.extradata->sizes[0]);
char *buf= abcdk_heap_alloc(1024*1024*16);
abcdk_mp4_tag_t a;
a.u32 = ABCDK_MP4_ATOM_MKTAG('\0','\0','\0','\1');
for(size_t i = 1 ;i<=stsz->data.stsz.numbers;i++)
{
uint32_t chunk=0, offset=0, id=0;
abcdk_mp4_stsc_tell(&stsc->data.stsc,i,&chunk,&offset,&id);
printf("[%lu]={chunk=%u,offset=%u,id=%u}\n",i,chunk,offset,id);
uint32_t offset2=0, size = 0;
abcdk_mp4_stsz_tell(&stsz->data.stsz,offset,i,&offset2,&size);
printf("[%lu]={offset2=%u,size=%u}\n",i,offset2,size);
lseek(fd,stco->data.stco.tables[chunk-1].offset + offset2,SEEK_SET);
abcdk_mp4_read(fd,buf,size);
abcdk_write(fd2,&a.u32,4);
abcdk_write(fd2,avcc->data.avcc.sps->pptrs[0],avcc->data.avcc.sps->sizes[0]);
abcdk_write(fd2,&a.u32,4);
abcdk_write(fd2,avcc->data.avcc.pps->pptrs[0],avcc->data.avcc.pps->sizes[0]);
memcpy(buf,&a.u32,4);//替换长度
abcdk_write(fd2,buf,size);
}
abcdk_closep(&fd2);
abcdk_heap_free(buf);
#endif
abcdk_tree_free(&root);
}
#define ADTS_HEADER_SIZE 7
typedef struct _adtsctx
{
int write_adts;
int objecttype;
int sample_rate_index;
int channel_conf;
} adtsctx;
int aac_decode_extradata(adtsctx *adts, unsigned char *pbuf, int bufsize)
{
int aot, aotext, samfreindex;
int i, channelconfig;
unsigned char *p = pbuf;
if (!adts || !pbuf || bufsize < 2)
{
return -1;
}
aot = (p[0] >> 3) & 0x1f;
if (aot == 31)
{
aotext = (p[0]<<3 | (p[1]>>5)) & 0x3f;
aot = 32 + aotext;
samfreindex = (p[1] >> 1) & 0x0f;
if (samfreindex == 0x0f)
{
channelconfig = ((p[4] << 3) | (p[5] >> 5)) & 0x0f;
}
else
{
channelconfig = ((p[1] << 3) | (p[2] >> 5)) & 0x0f;
}
}
else
{
samfreindex = ((p[0] << 1) | p[1] >> 7) & 0x0f;
if (samfreindex == 0x0f)
{
channelconfig = (p[4] >> 3) & 0x0f;
}
else
{
channelconfig = (p[1] >> 3) & 0x0f;
}
}
#ifdef AOT_PROFILE_CTRL
if (aot < 2)
aot = 2;
#endif
adts->objecttype = aot-1;
adts->sample_rate_index = samfreindex;
adts->channel_conf = channelconfig;
adts->write_adts = 1;
return 0;
}
int aac_set_adts_head(adtsctx *acfg, unsigned char *buf, int size)
{
unsigned char byte;
if (size < ADTS_HEADER_SIZE)
return -1;
buf[0] = 0xff;
buf[1] = 0xf1;
byte = 0;
byte |= (acfg->objecttype & 0x03) << 6;
byte |= (acfg->sample_rate_index & 0x0f) << 2;
byte |= (acfg->channel_conf & 0x07) >> 2;
buf[2] = byte;
byte = 0;
byte |= (acfg->channel_conf & 0x07) << 6;
byte |= (ADTS_HEADER_SIZE + size) >> 11;
buf[3] = byte;
byte = 0;
byte |= (ADTS_HEADER_SIZE + size) >> 3;
buf[4] = byte;
byte = 0;
byte |= ((ADTS_HEADER_SIZE + size) & 0x7) << 5;
byte |= (0x7ff >> 6) & 0x1f;
buf[5] = byte;
byte = 0;
byte |= (0x7ff & 0x3f) << 2;
buf[6] = byte;
return 0;
}
void collect_mp4_sound(int fd)
{
abcdk_tree_t *root = abcdk_mp4_read_probe2(fd,0,-1UL, 0);
abcdk_mp4_dump(stdout,root);
abcdk_tree_t *video_p = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_TRAK,2,1);
abcdk_tree_t *stsz_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSZ,1,1);
abcdk_tree_t *stss_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSS,1,1);
abcdk_tree_t *stts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STTS,1,1);
abcdk_tree_t *ctts_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_CTTS,1,1);
abcdk_tree_t *stsc_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STSC,1,1);
abcdk_tree_t *stco_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_STCO,1,1);
abcdk_tree_t *mp4a_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_MP4A,1,1);
abcdk_tree_t *esds_p = abcdk_mp4_find2(video_p,ABCDK_MP4_ATOM_TYPE_ESDS,1,1);
abcdk_mp4_atom_t *stsz = (abcdk_mp4_atom_t*)stsz_p->alloc->pptrs[0];
// abcdk_mp4_atom_t *stss = (abcdk_mp4_atom_t*)stss_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stts = (abcdk_mp4_atom_t*)stts_p->alloc->pptrs[0];
// abcdk_mp4_atom_t *ctts = (abcdk_mp4_atom_t*)ctts_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stco = (abcdk_mp4_atom_t*)stco_p->alloc->pptrs[0];
abcdk_mp4_atom_t *stsc = (abcdk_mp4_atom_t*)stsc_p->alloc->pptrs[0];
abcdk_mp4_atom_t *mp4a = (abcdk_mp4_atom_t*)mp4a_p->alloc->pptrs[0];
abcdk_mp4_atom_t *esds = (abcdk_mp4_atom_t*)esds_p->alloc->pptrs[0];
int fd2 = abcdk_open("/tmp/abcdk.acc",1,0,1);
// ftruncate(fd2,0);
lseek(fd2,0,SEEK_END);
char *buf= abcdk_heap_alloc(1024*1024*16);
adtsctx adts={0};
aac_decode_extradata(&adts,esds->data.esds.dec_sp_info.extradata->pptrs[0],esds->data.esds.dec_sp_info.extradata->sizes[0]);
for(size_t i = 1 ;i<=stsz->data.stsz.numbers;i++)
{
uint32_t chunk=0, offset=0, id=0;
abcdk_mp4_stsc_tell(&stsc->data.stsc,i,&chunk,&offset,&id);
printf("[%lu]={chunk=%u,offset=%u,id=%u}\n",i,chunk,offset,id);
uint32_t offset2=0, size = 0;
abcdk_mp4_stsz_tell(&stsz->data.stsz,offset,i,&offset2,&size);
printf("[%lu]={offset2=%u,size=%u}\n",i,offset2,size);
lseek(fd,stco->data.stco.tables[chunk-1].offset + offset2,SEEK_SET);
abcdk_mp4_read(fd,buf,size);
char hdr[7]={0};
aac_set_adts_head(&adts,hdr,size);
abcdk_write(fd2,hdr,7);
abcdk_write(fd2,buf,size);
}
abcdk_closep(&fd2);
abcdk_heap_free(buf);
abcdk_tree_free(&root);
}
void test_mp4(abcdk_tree_t *args)
{
const char *name_p = abcdk_option_get(args,"--file",0,"");
#if 0
abcdk_allocator_t *t = abcdk_mmap2(name_p,0,0);
if(!t)
return;
abcdk_buffer_t *buf = abcdk_buffer_alloc(t);
if(!buf)
{
abcdk_allocator_unref(&t);
return;
}
buf->wsize = t->sizes[0];
while (1)
{
uint32_t size2 = 0;
uint64_t size = 0;
if (_mp4_read_u32(buf, &size2))
break;
uint32_t type = 0;
if (_mp4_read(buf, &type, sizeof(uint32_t)))
break;
for (int i = 0; i < 4; i++)
printf("%c", ABCDK_PTR2I8(&type, i));
printf("\n");
if (size2 == 0)
break;
else if (size2 == 1)
{
if (_mp4_read_u64(buf, &size))
break;
}
size = size2;
size_t hsize = (size2==1?16:8);
/*skip data*/
if(_mp4_skip_size(buf,size-hsize))
break;
}
abcdk_buffer_free(&buf);
#else
int fd = abcdk_open(name_p,0,0,0);
if(fd<0)
return;
#if 0
abcdk_tree_t *root = abcdk_mp4_read_probe(fd,0,-1UL, NULL);
abcdk_tree_t *ftyp = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_FTYP,1,0);
abcdk_tree_t *moov = abcdk_mp4_find2(root,ABCDK_MP4_ATOM_TYPE_MOOV,1,0);
abcdk_tree_iterator_t it = {0,mp4_dump_cb,(void*)(int64_t)fd};
abcdk_tree_scan(root,&it);
printf("\natoms:%d\n",atoms);
abcdk_tree_free(&root);
#else
// show_mp4_info(fd);
//collect_mp4_video(fd);
collect_mp4_sound(fd);
//collect_fmp4_video(fd);
#endif
abcdk_closep(&fd);
#endif
}
void test_dirent(abcdk_tree_t *args)
{
const char *path_p = abcdk_option_get(args,"--path",0,"");
abcdk_tree_t *t = abcdk_tree_alloc3(1);
abcdk_dirent_open(t,path_p);
for(;;)
{
char file[PATH_MAX]={0};
int chk = abcdk_dirent_read(t,file);
if(chk != 0)
break;
printf("%s\n",file);
abcdk_dirent_open(t,file);
}
}
void test_netlink(abcdk_tree_t *args)
{
const char *ap = abcdk_option_get(args,"--i",0,"");
int flag = 0;
int chk = abcdk_netlink_fetch(ap,&flag);
if (chk == 0)
{
printf("%s: UP=%s,BCAST=%s,MCAST=%s,LOOP=%s,P2P=%s,RUN=%s\n", ap,
(flag & IFF_UP) ? "Yes" : "No",
(flag & IFF_BROADCAST) ? "Yes" : "No",
(flag & IFF_MULTICAST) ? "Yes" : "No",
(flag & IFF_LOOPBACK) ? "Yes" : "No",
(flag & IFF_POINTOPOINT) ? "Yes" : "No",
(flag & IFF_RUNNING) ? "Yes" : "No");
}
else
printf("%s: %s\n", ap, strerror(errno));
}
#ifdef HAVE_LIBNM
void request_rescan_cb (GObject *object, GAsyncResult *result, gpointer user_data)
{
NMClient *cli = (NMClient *) user_data;
GError *error = NULL;
nm_device_wifi_request_scan_finish (NM_DEVICE_WIFI (object), result, &error);
// if (error) {
// g_string_printf (nmc->return_text, _("Error: %s."), error->message);
// nmc->return_value = NMC_RESULT_ERROR_UNKNOWN;
// g_error_free (error);
// }
}
static int
compare_devices (const void *a, const void *b)
{
NMDevice *da = *(NMDevice **)a;
NMDevice *db = *(NMDevice **)b;
int cmp;
/* Sort by later device states first */
cmp = nm_device_get_state (db) - nm_device_get_state (da);
if (cmp != 0)
return cmp;
cmp = g_strcmp0 (nm_device_get_type_description (da),
nm_device_get_type_description (db));
if (cmp != 0)
return cmp;
return g_strcmp0 (nm_device_get_iface (da),
nm_device_get_iface (db));
}
static NMDevice **
get_devices_sorted (NMClient *client)
{
const GPtrArray *devs;
NMDevice **sorted;
devs = nm_client_get_devices (client);
sorted = g_new (NMDevice *, devs->len + 1);
memcpy (sorted, devs->pdata, devs->len * sizeof (NMDevice *));
sorted[devs->len] = NULL;
qsort (sorted, devs->len, sizeof (NMDevice *), compare_devices);
return sorted;
}
#endif //HAVE_LIBNM
void
iw_essid_escape(char * dest,
const char * src,
const int slen)
{
const unsigned char * s = (const unsigned char *) src;
const unsigned char * e = s + slen;
char * d = dest;
/* Look every character of the string */
while(s < e)
{
int isescape;
/* Escape the escape to avoid ambiguity.
* We do a fast path test for performance reason. Compiler will
* optimise all that ;-) */
if(*s == '\\')
{
/* Check if we would confuse it with an escape sequence */
if((e-s) > 4 && (s[1] == 'x')
&& (isxdigit(s[2])) && (isxdigit(s[3])))
{
isescape = 1;
}
else
isescape = 0;
}
else
isescape = 0;
/* Is it a non-ASCII character ??? */
if(isescape || !isascii(*s) || iscntrl(*s))
{
/* Escape */
sprintf(d, "\\x%02X", *s);
d += 4;
}
else
{
/* Plain ASCII, just copy */
*d = *s;
d++;
}
s++;
}
/* NUL terminate destination */
*d = '\0';
}
void test_iwscan(abcdk_tree_t *args)
{
#if 0
abcdk_allocator_t * k = abcdk_allocator_alloc(NULL,1,0);
abcdk_allocator_t * p = abcdk_allocator_alloc(NULL,1,0);
k->pptrs[0] = "GH";
k->sizes[0] = 2;
p->pptrs[0] = ABCDK_ANSI_COLOR_RED;
//
int sock = socket(AF_INET, SOCK_DGRAM, 0);
struct iw_scan_req scan_req = {0};
// scan_req.scan_type = IW_SCAN_TYPE_ACTIVE;
//scan_req.flags = ;
struct iwreq req = {0};
strncpy(req.ifr_ifrn.ifrn_name, "wlx70f11c3c3500", IFNAMSIZ);
//req.u.data.pointer = &scan_req;
//req.u.data.length = sizeof(struct iw_scan_req);
//req.u.data.flags = IW_SCAN_DEFAULT;
// int sock = socket(AF_INET, SOCK_DGRAM, 0);
// int chk = abcdk_socket_ioctl(SIOCSIWSCAN,&req);
int chk = ioctl(sock, SIOCSIWSCAN,&req);
abcdk_allocator_t * scan_rsp = abcdk_allocator_alloc2(100000);
/* Forever */
while (1)
{
struct iwreq rsp = {0};
strncpy(rsp.ifr_ifrn.ifrn_name, "wlx70f11c3c3500", IFNAMSIZ);
rsp.u.data.pointer = scan_rsp->pptrs[0];
rsp.u.data.length = scan_rsp->sizes[0];
rsp.u.data.flags = 0;
//chk = abcdk_socket_ioctl(SIOCGIWSCAN,&rsp);
chk = ioctl(sock,SIOCGIWSCAN,&rsp);
if (chk !=0)
{
if(errno == EAGAIN)
continue;
else
goto END;
}
abcdk_hexdump_option_t opt = {0};
if(rsp.u.data.length)
abcdk_hexdump(stderr,rsp.u.data.pointer,rsp.u.data.length,0,&opt);
void *p = rsp.u.data.pointer;
for (;p - rsp.u.data.pointer < rsp.u.data.length;)
{
struct iw_event *event = ABCDK_PTR2PTR(struct iw_event, p, 0);
printf("cmd = %04X,len = %hu\n", event->cmd, event->len);
switch (event->cmd)
{
case SIOCGIWAP:
{
struct ether_addr *eth = ABCDK_PTR2PTR(struct ether_addr, event->u.addr.sa_data, 0);
printf("address: %02X:%02X:%02X:%02X:%02X:%02X\n",
eth->ether_addr_octet[0], eth->ether_addr_octet[1],
eth->ether_addr_octet[2], eth->ether_addr_octet[3],
eth->ether_addr_octet[4], eth->ether_addr_octet[5]);
}
break;
case SIOCGIWNWID:
{
if (event->u.nwid.disabled)
printf("\tNWID: off/any\n");
else
printf(" NWID: %X\n", event->u.nwid.value);
}
break;
case SIOCGIWFREQ:
{
printf("\tchannel: %f\n",((double) event->u.freq.m) * pow(10,event->u.freq.e));
}
break;
case SIOCGIWESSID:
{
break;
event->u.essid.pointer = p+4+sizeof(struct iw_point);
event->u.essid.length = event->len-4-sizeof(struct iw_point);
char essid[4 * IW_ESSID_MAX_SIZE + 1];
memset(essid, '\0', sizeof(essid));
if ((event->u.essid.pointer) && (event->u.essid.length))
iw_essid_escape(essid,event->u.essid.pointer, event->u.essid.length);
if (event->u.essid.flags)
{
if ((event->u.essid.flags & IW_ENCODE_INDEX) > 1)
printf("\tESSID: %s [%d]\n",essid,(event->u.essid.flags & IW_ENCODE_INDEX));
else
printf("\tESSID: %s\n",essid);
}
else
{
printf("\tESSID: off/any/hidden\n");
}
}
break;
default:
break;
}
p = p+event->len;
}
goto END;
}
END:
abcdk_allocator_unref(&scan_rsp);
abcdk_allocator_unref(&k);
abcdk_allocator_unref(&p);
abcdk_closep(&sock);
#else
#ifdef HAVE_LIBNM
GError *err = NULL;
NMClient *cli = nm_client_new(NULL,&err);
// NMDevice *dev = nm_client_get_device_by_iface(cli,"wlx70f11c3c3500");
// gboolean chk = nm_device_wifi_request_scan(NM_DEVICE_WIFI(dev),NULL,&err);
// nm_device_wifi_request_scan_async (NM_DEVICE_WIFI (dev),
// NULL, request_rescan_cb, cli);
// //nm_device_wifi_request_scan_finish(&device,&cancellable,&err);
// g_error_free(err);
NMDevice **devices = get_devices_sorted (cli);
for (int i = 0; devices[i]; i++)
{
NMDevice *dev = devices[i];
if (!NM_IS_DEVICE_WIFI (dev))
continue;
NMAccessPoint * ap = nm_device_wifi_get_active_access_point(NM_DEVICE_WIFI (dev));
const char * ssid = ap? nm_access_point_get_bssid (ap):"";
printf("ssid: %s\n",ssid);
}
#endif //HAVE_LIBNM
#endif
}
void test_hexdump(abcdk_tree_t *args)
{
const char *file_p = abcdk_option_get(args,"--file",0,"");
abcdk_allocator_t * m = abcdk_mmap2(file_p,0,0);
abcdk_hexdump_option_t opt = {0};
if(abcdk_option_exist(args,"--show-addr"))
opt.flag |= ABCDK_HEXDEMP_SHOW_ADDR;
if(abcdk_option_exist(args,"--show-char"))
opt.flag |= ABCDK_HEXDEMP_SHOW_CHAR;
opt.width = abcdk_option_get_int(args,"--width",0,16);
opt.keyword = abcdk_allocator_alloc(NULL,4,0);
opt.palette = abcdk_allocator_alloc(NULL,3,0);
opt.keyword->pptrs[0] = "mvhd";
opt.keyword->sizes[0] = 4;
opt.keyword->pptrs[1] = "ftyp";
opt.keyword->sizes[1] = 4;
opt.keyword->pptrs[2] = "moov";
opt.keyword->sizes[2] = 4;
opt.keyword->pptrs[3] = "mdat";
opt.keyword->sizes[3] = 4;
opt.palette->pptrs[0] = ABCDK_ANSI_COLOR_RED;
opt.palette->pptrs[1] = ABCDK_ANSI_COLOR_GREEN;
opt.palette->pptrs[2] = ABCDK_ANSI_COLOR_BLUE;
if(m)
{
//ssize_t w = abcdk_hexdump(stdout,m->pptrs[0],m->sizes[0],0,&opt);
ssize_t w = abcdk_hexdump(stdout,m->pptrs[0],1000,0,&opt);
fprintf(stderr,"w=%ld",w);
}
abcdk_allocator_unref(&m);
abcdk_allocator_unref(&opt.keyword);
abcdk_allocator_unref(&opt.palette);
}
void test_video(abcdk_tree_t *args)
{
#ifdef HAVE_FFMPEG
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int chk;
const char *src_file_p = abcdk_option_get(args,"--src",0,"");
const char *dst_file_p = abcdk_option_get(args,"--dst",0,"");
AVDictionary *dict = NULL;
#if 1
av_dict_set(&dict,"framerate","120",0);
//av_dict_set(&dict,"video_size","1920x1080",0);
av_dict_set(&dict,"video_size","640x480",0);
//av_dict_set(&dict,"input_format","mjpeg",0);
av_dict_set(&dict,"input_format","yuyv422",0);
#endif
abcdk_video_t *src = abcdk_video_open_capture(NULL,src_file_p,-1UL,1,dict);
av_dict_free(&dict);
//abcdk_avformat_show_options(src->ctx);
//int dst = abcdk_open(dst_file_p,1,0,1);
abcdk_video_t *dst = abcdk_video_open_writer(NULL,dst_file_p,NULL);
int stream_index = abcdk_video_find_stream(src,1);
double fps = abcdk_video_get_fps(src,stream_index);
int width = abcdk_video_get_width(src,stream_index);
int height = abcdk_video_get_height(src,stream_index);
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58,35,100)
enum AVCodecID id = src->ctx->streams[stream_index]->codec->codec_id;
#else
enum AVCodecID id = src->ctx->streams[stream_index]->codecpar->codec_id;
#endif
//int stream_index2 = abcdk_video_add_stream(dst,fps,width,height,id,NULL,0,0);
int stream_index2 = abcdk_video_add_stream(dst,fps,width,height,AV_CODEC_ID_H264,NULL,0,0);
// int stream_index2 = abcdk_video_add_stream(dst, fps, width, height, id,
// src->ctx->streams[stream_index]->codec->extradata,
// src->ctx->streams[stream_index]->codec->extradata_size,
// 1);
uint64_t c = 0;
uint64_t s = 0;
abcdk_video_write_header(dst,0,1);
printf("LONG: %f\n",abcdk_video_get_duration(src,stream_index));
printf("FPS: %f\n",abcdk_video_get_fps(src,stream_index));
AVPacket pkt;
av_init_packet(&pkt);
AVFrame *fae = av_frame_alloc();
AVFrame *fae2 = av_frame_alloc();
fae2->format = dst->codec_ctx[0]->pix_fmt;
fae2->height = height;
fae2->width = width;
av_frame_get_buffer(fae2,1);
struct SwsContext *sws = NULL;
for(int i =0;i<1000;i++)
{
// chk = abcdk_video_read(src,&pkt,stream_index,0,1);
chk = abcdk_video_read2(src,fae,stream_index,0);
if(chk < 0)
break;
printf("DTS: %f ,PTS: %f\n",
// abcdk_video_ts2sec(src, pkt.stream_index, pkt.dts),
// abcdk_video_ts2sec(src, pkt.stream_index, pkt.pts));
abcdk_video_ts2sec(src, chk, fae->pkt_dts),
abcdk_video_ts2sec(src, chk, fae->pkt_pts));
// abcdk_write(dst,pkt.data,pkt.size);
// chk = abcdk_video_write3(dst,stream_index2,pkt.data,pkt.size);
if(!sws)
sws = abcdk_sws_alloc2(fae, fae2, 0);
abcdk_sws_scale(sws,fae,fae2);
chk = abcdk_video_write2(dst,stream_index2,fae2);
if(chk < 0)
break;
s = abcdk_clock(c, &c) / 1000;
if (s < (1000 / fps))
usleep(((1000 / fps) - s) * 1000);
}
av_frame_free(&fae);
av_packet_unref(&pkt);
abcdk_video_write_trailer(dst);
// abcdk_closep(&dst);
abcdk_video_close(dst);
abcdk_video_close(src);
#pragma GCC diagnostic pop
#endif //
}
void test_com(abcdk_tree_t *args)
{
const char *port = abcdk_option_get(args,"--port",0,"");
int fd = open(port,O_RDWR|O_NOCTTY);
// assert(isatty(fd)==0);
#if 0
struct termios opt = {0};
int chk = tcgetattr(fd,&opt);
// tcflush(fd, TCIOFLUSH);
cfsetispeed(&opt,B9600);
cfsetospeed(&opt,B9600);
opt.c_cflag |=(CLOCAL|CREAD);
opt.c_cflag &= ~PARENB;
opt.c_cflag &= ~CSTOPB;
opt.c_cflag &= ~CSIZE;
opt.c_cflag |= ~CS8;
opt.c_cc[VTIME] = 0;
opt.c_cc[VMIN] = 0;
tcflush(fd,TCIOFLUSH);
//cfsetispeed(&opt,B4800);
assert(tcsetattr(fd,TCSANOW,&opt)==0);
struct serial_rs485 conf = {0};
conf.flags |= SER_RS485_ENABLED;
// conf.flags |= SER_RS485_RX_DURING_TX;
// assert(ioctl(fd,TIOCSRS485,&conf)==0);
#else
assert(abcdk_tcattr_serial(fd, 9600, 8, 0, 1,NULL)== 0);
#endif
uint64_t s = 0,s1 = 0,s2 = 0;
char buf1[18]={0};
char buf2[18]={0};
for(int i = 0;i<999999999;i++)
{
int chk = abcdk_poll(fd,0x01,-1);
assert(chk>0);
abcdk_read(fd,buf1,17);
s1 = abcdk_clock(s,&s);
s2 += s1;
if(memcmp(buf1,buf2,17)!=0)
s2 = 0;
else if(s2 >= 1000000)
s2 = 0;
if(s2 ==0 )
{
memcpy(buf2,buf1,17);
char buf3[35]={0};
abcdk_bin2hex(buf3,buf1,17,0);
printf("[%d]: '%s' '%s'\n",i,buf1,buf3);
}
}
abcdk_closep(&fd);
}
void test_mpi(abcdk_tree_t *args)
{
#ifdef HAVE_MPI
// int argc = 1;
// char *argv[1] = {
// abcdk_option_get(args,"--",0,""),
// };
int rank,size;
// MPI_Init(&argc, &argv);
MPI_Init(NULL,NULL);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
MPI_Comm_size(MPI_COMM_WORLD, &size);
printf("Hello World from thread %d of %d\n", rank, size);
MPI_Finalize();
#endif
}
void test_lz4(abcdk_tree_t *args)
{
#ifdef HAVE_LZ4
const char *src = abcdk_option_get(args,"--src",0,"");
const char *dst = abcdk_option_get(args,"--dst",0,"");
abcdk_allocator_t *s = abcdk_mmap2(src,0,0);
size_t dsize = abcdk_endian_b_to_h32(ABCDK_PTR2U32(s->pptrs[0],0));
abcdk_allocator_t *d = abcdk_allocator_alloc2(dsize);
//LZ4_decompress_fast(s->pptrs[0]+4,d->pptrs[0],dsize);
int m = abcdk_lz4_dec_fast(d->pptrs[0],dsize,s->pptrs[0]+4);
abcdk_allocator_t *q = abcdk_allocator_alloc2(2000);
int n = abcdk_lz4_enc_default(q->pptrs[0],q->sizes[0],d->pptrs[0],dsize);
//assert(memcmp(q->pptrs[0],s->pptrs[0]+4,s->sizes[0]-4)==0);
abcdk_allocator_t *p = abcdk_allocator_alloc2(dsize);
int m2 = abcdk_lz4_dec_fast(p->pptrs[0],dsize,q->pptrs[0]);
assert(memcmp(p->pptrs[0],d->pptrs[0],d->sizes[0])==0);
abcdk_allocator_unref(&q);
abcdk_allocator_unref(&p);
int fd = abcdk_open(dst,1,0,1);
ftruncate(fd,0);
abcdk_write(fd,d->pptrs[0],dsize);
abcdk_closep(&fd);
abcdk_allocator_unref(&s);
abcdk_allocator_unref(&d);
#endif
}
void test_archive(abcdk_tree_t *args)
{
#ifdef HAVE_ARCHIVE
const char *src = abcdk_option_get(args,"--src",0,"");
const char *dst = abcdk_option_get(args,"--dst",0,"");
struct archive *a = archive_write_new();
struct archive_entry *entry = archive_entry_new();
// archive_write_add_filter_bzip2(a);
// archive_write_set_format_zip(a);
// archive_write_add_filter_gzip(a);
// archive_write_set_format_pax_restricted(a); // Note 1
archive_write_set_format_gnutar(a);
archive_write_open_filename(a, dst);
int fd = abcdk_open(src,0,0,0);
struct stat st = {0};
fstat(fd,&st);
archive_entry_copy_pathname(entry,src+10);
#if 0
archive_entry_set_size(entry, st.st_size); // Note 3
archive_entry_set_filetype(entry, AE_IFREG);
archive_entry_set_perm(entry, 0644);
#else
archive_entry_copy_stat(entry,&st);
#endif
archive_write_header(a, entry);
char buf[500];
for(;;)
{
ssize_t r = abcdk_read(fd,buf,500);
if(r<=0)
break;
archive_write_data(a,buf,r);
}
archive_write_finish_entry(a);
archive_entry_free(entry);
abcdk_closep(&fd);
archive_write_close(a);
archive_write_free(a);
#endif
}
void test_modbus(abcdk_tree_t *args)
{
#ifdef HAVE_MODBUS
const char *port = abcdk_option_get(args,"--port",0,"");
modbus_t *m = modbus_new_rtu(port, 9600, 'N', 8, 1);
modbus_set_debug(m, 0);
modbus_set_slave(m,1);
modbus_connect(m);
struct timeval t;
t.tv_sec = 10;
t.tv_usec = 0;
modbus_set_response_timeout(m, &t);
//int chk = modbus_rtu_set_serial_mode(m,MODBUS_RTU_RS232);
int f2 = 0;
while(1)
{
uint16_t buf[20]={0};
int regs = modbus_read_registers(m,3,2,buf);
int f = ABCDK_PTR2OBJ(float, buf, 0) * 1000;
if (f != f2)
{
printf("%f\n", (float)f / 1000);
f2 = f;
}
usleep(1000);
}
modbus_close(m);
modbus_free(m);
#endif
}
#ifdef HAVE_LIBUSB
static void print_devs(libusb_device **devs)
{
libusb_device *dev;
int i = 0, j = 0;
uint8_t path[8];
while ((dev = devs[i++]) != NULL) {
struct libusb_device_descriptor desc;
int r = libusb_get_device_descriptor(dev, &desc);
if (r < 0) {
fprintf(stderr, "failed to get device descriptor");
return;
}
printf("%04x:%04x (bus %d, device %d)",
desc.idVendor, desc.idProduct,
libusb_get_bus_number(dev), libusb_get_device_address(dev));
r = libusb_get_port_numbers(dev, path, sizeof(path));
if (r > 0) {
printf(" path: %d", path[0]);
for (j = 1; j < r; j++)
printf(".%d", path[j]);
}
printf("\n");
}
}
#endif
int test_libusb(abcdk_tree_t *args)
{
#ifdef HAVE_LIBUSB
libusb_device **devs;
int r;
ssize_t cnt;
r = libusb_init(NULL);
if (r < 0)
return r;
cnt = libusb_get_device_list(NULL, &devs);
if (cnt < 0){
libusb_exit(NULL);
return (int) cnt;
}
print_devs(devs);
libusb_free_device_list(devs, 1);
libusb_exit(NULL);
#endif
return 0;
}
#ifdef HAVE_OPENSSL
void test_openssl_server(abcdk_tree_t *args)
{
#if OPENSSL_VERSION_NUMBER <= 0x100020bfL
const SSL_METHOD *method = TLSv1_2_server_method();
#else
const SSL_METHOD *method = TLS_server_method();
#endif
SSL_CTX * ctx = SSL_CTX_new(method);
int chk;
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
/*如果使用证书路径加载证书,则需要使用工具生成证收的hash文件名。c_rehash <CApath> */
chk = SSL_CTX_load_verify_locations(ctx, NULL, capath);
assert(chk == 1);
X509_VERIFY_PARAM *param = SSL_CTX_get0_param(ctx);
//X509_VERIFY_PARAM_set_purpose(param, X509_PURPOSE_ANY);
X509_VERIFY_PARAM_set_flags(param, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
}
chk = abcdk_openssl_ssl_ctx_load_crt(ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
SSL* s = abcdk_openssl_ssl_alloc(ctx);
SSL_set_verify(s,SSL_VERIFY_PEER,NULL);
abcdk_sockaddr_t addr = {0};
//abcdk_sockaddr_from_string(&addr,"0.0.0.0:12345",0);
addr.family = ABCDK_UNIX;
strcpy(addr.addr_un.sun_path,"/tmp/abcdk.txt2");
int l = abcdk_socket(addr.family,0);
int flag = 1;
abcdk_sockopt_option_int(l, SOL_SOCKET, SO_REUSEPORT, &flag, 2);
abcdk_sockopt_option_int(l, SOL_SOCKET, SO_REUSEADDR, &flag, 2);
unlink(addr.addr_un.sun_path);
assert(abcdk_bind(l,&addr)==0);
assert(listen(l, SOMAXCONN)==0);
abcdk_sockaddr_t addr2 = {0};
int c = abcdk_accept(l,&addr2);
assert(abcdk_openssl_ssl_handshake(c,s,1,10000)==0);
int chk2 = SSL_get_verify_result(s);
printf("chk2 = %d\n",chk2);
//assert(X509_V_OK == chk2);
char buf[100]={0};
SSL_read(s,buf,5);
printf("{%s}\n",buf);
SSL_write(s,"abcdk",5);
abcdk_closep(&c);
abcdk_closep(&l);
abcdk_openssl_ssl_free(&s);
SSL_CTX_free(ctx);
}
void test_openssl_client(abcdk_tree_t *args)
{
#if OPENSSL_VERSION_NUMBER <= 0x100020bfL
const SSL_METHOD *method = TLSv1_2_client_method();
#else
const SSL_METHOD *method = TLS_client_method();
#endif
int chk ;
SSL_CTX * ctx = SSL_CTX_new(method);
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
chk = SSL_CTX_load_verify_locations(ctx, NULL, capath);
assert(chk == 1);
X509_VERIFY_PARAM *param = SSL_CTX_get0_param(ctx);
//X509_VERIFY_PARAM_set_purpose(param, X509_PURPOSE_ANY);
X509_VERIFY_PARAM_set_flags(param, X509_V_FLAG_CRL_CHECK | X509_V_FLAG_CRL_CHECK_ALL);
}
chk = abcdk_openssl_ssl_ctx_load_crt(ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
assert(chk == 0);
SSL* s = abcdk_openssl_ssl_alloc(ctx);
// void *p = SSL_get_app_data(s);
// printf("p = %p\n",p);
SSL_set_verify(s,SSL_VERIFY_PEER,NULL);
abcdk_sockaddr_t addr = {0};
// abcdk_sockaddr_from_string(&addr,
// abcdk_option_get(args, "--server-addr", 0, "localhost:12345"),
// 1);
addr.family = ABCDK_UNIX;
strcpy(addr.addr_un.sun_path,"/tmp/abcdk.txt2");
int c = abcdk_socket(addr.family,0);
assert(abcdk_connect(c,&addr,10000)==0);
assert(abcdk_openssl_ssl_handshake(c,s,0,10000)==0);
int chk2 = SSL_get_verify_result(s);
printf("chk2 = %d\n",chk2);
//assert(X509_V_OK == chk2);
SSL_write(s,"abcdk",5);
char buf[100]={0};
SSL_read(s,buf,100);
printf("{%s}\n",buf);
abcdk_closep(&c);
abcdk_openssl_ssl_free(&s);
SSL_CTX_free(ctx);
}
#endif
int test_openssl(abcdk_tree_t *args)
{
int sub_func = abcdk_option_get_int(args, "--sub-func", 0, 0);
#ifdef HAVE_OPENSSL
if (sub_func == 1)
test_openssl_server(args);
else if (sub_func == 2)
test_openssl_client(args);
#endif
return 0;
}
#ifdef HAVE_MQTT
void my_message_callback(struct mosquitto *mosq, void *userdata, const struct mosquitto_message *message)
{
if (message->payloadlen)
{
printf("%s %s\n", message->topic, (char*)message->payload);
}
else
{
printf("%s (null)\n", message->topic);
}
fflush(stdout);
}
void my_connect_callback(struct mosquitto *mosq, void *userdata, int result)
{
int i;
if (!result)
{
/* Subscribe to broker information topics on successful connect. */
//mosquitto_subscribe(mosq, NULL, "$SYS/#", 2);
mosquitto_subscribe(mosq, NULL, "hello", 2);
}
else
{
fprintf(stderr, "Connect failed\n");
}
}
void my_subscribe_callback(struct mosquitto *mosq, void *userdata, int mid, int qos_count, const int *granted_qos)
{
int i;
printf("Subscribed (mid: %d): %d", mid, granted_qos[0]);
for (i = 1; i < qos_count; i++)
{
printf(", %d", granted_qos[i]);
}
printf("\n");
}
void my_log_callback(struct mosquitto *mosq, void *userdata, int level, const char *str)
{
/* Pring all log messages regardless of level. */
printf("%s\n", str);
}
#endif
int test_mqtt(abcdk_tree_t *args)
{
#ifdef HAVE_MQTT
int i;
char *host = "localhost";
int port = 1883;
int keepalive = 60;
bool clean_session = true;
struct mosquitto *mosq = NULL;
mosquitto_lib_init();
mosq = mosquitto_new(NULL, clean_session, NULL);
if (!mosq)
{
fprintf(stderr, "Error: Out of memory.\n");
return 1;
}
mosquitto_log_callback_set(mosq, my_log_callback);
mosquitto_connect_callback_set(mosq, my_connect_callback);
mosquitto_message_callback_set(mosq, my_message_callback);
mosquitto_subscribe_callback_set(mosq, my_subscribe_callback);
if (mosquitto_connect(mosq, host, port, keepalive))
{
fprintf(stderr, "Unable to connect.\n");
return 1;
}
mosquitto_loop_forever(mosq, -1, 1);
mosquitto_destroy(mosq);
mosquitto_lib_cleanup();
#endif
return 0;
}
void test_http(abcdk_tree_t *args)
{
int s = abcdk_socket(ABCDK_IPV4,0);
abcdk_sockaddr_t a;
a.family = ABCDK_IPV4;
abcdk_sockaddr_from_string(&a,"0.0.0.0:12345",0);
abcdk_bind(s,&a);
listen(s,10);
int c = abcdk_accept(s,NULL);
char buf[10]={0};
printf("--->>>\r\n");
while(read(c,buf,1)>0)
{
printf("%s",buf);
}
printf("<<<---\r\n");
abcdk_closep(&c);
abcdk_closep(&s);
}
void test_redis(abcdk_tree_t *args)
{
#ifdef __HIREDIS_H
const char *server = abcdk_option_get(args, "--server", 0, "127.0.0.1");
int port = abcdk_option_get_int(args, "--port", 0, 6379);
redisContext *c = abcdk_redis_connect(server, port, 20);
if (!c)
return;
//printf("%s\n", c->errstr);
int chk = abcdk_redis_auth(c,"12345678");
assert(chk==0);
char buf[128]={0};
abcdk_redis_get_auth(c,buf);
printf("{%s}\n",buf);
chk = abcdk_redis_set_auth(c,"12345678");
assert(chk==0);
chk = abcdk_redis_auth(c,"12345678");
assert(chk==0);
char buf2[128]={0};
abcdk_redis_get_auth(c,buf2);
printf("{%s}\n",buf2);
redisFree(c);
#endif //
}
void test_cert_verify(abcdk_tree_t *args)
{
#ifdef HAVE_OPENSSL
const char *user = abcdk_option_get(args, "--user-crt", 0, "");
//SSLeay_add_all_algorithms();
X509 *cert = abcdk_openssl_load_crt(user,NULL);
//PEM_read_X509_CRL()
X509_STORE *store = X509_STORE_new();
for(int i = 0;i<100;i++)
{
const char *ca = abcdk_option_get(args,"--ca-crt",i,NULL);
if(!ca)
break;
abcdk_openssl_load_crt2store(store,ca,NULL);
}
for(int i = 0;i<100;i++)
{
const char *ca = abcdk_option_get(args,"--ca-crl",i,NULL);
if(!ca)
break;
abcdk_openssl_load_crl2store(store,ca,NULL);
}
X509_STORE_CTX *store_ctx = abcdk_openssl_verify_crt_prepare(store,cert);
X509_VERIFY_PARAM *param = X509_STORE_CTX_get0_param(store_ctx);
// X509_VERIFY_PARAM_set_purpose(param, X509_PURPOSE_ANY);
/*
* X509_V_FLAG_CRL_CHECK 只验证叶证书是否被吊销,并且只要求叶证书的父级证书存在吊销列表即可。
* X509_V_FLAG_CRL_CHECK_ALL 验证证书链,并且要求所有父级证书(根证书除外)的吊销列表都存在。
*
* X509_V_FLAG_CRL_CHECK_ALL 单独启用无效,至少要配合X509_V_FLAG_CRL_CHECK启用。
*/
X509_VERIFY_PARAM_set_flags(param,X509_V_FLAG_CRL_CHECK|X509_V_FLAG_CRL_CHECK_ALL);
// X509_VERIFY_PARAM_set_flags(param,X509_V_FLAG_CRL_CHECK);
int chk = X509_verify_cert(store_ctx);
assert(chk == 1);
//X509_VERIFY_PARAM_free(param);
X509_free(cert);
X509_STORE_free(store);
X509_STORE_CTX_free(store_ctx);
#endif
}
void test_json(abcdk_tree_t *args)
{
#ifdef _json_h_
const char *src = abcdk_option_get(args,"--src",0,NULL);
json_object *src_obj = json_object_from_file(src);
abcdk_json_readable(stdout,1,0,src_obj);
abcdk_json_unref(&src_obj);
#endif //_json_h_
}
void test_refer_count(abcdk_tree_t *args)
{
int user = abcdk_option_get_int(args,"--user",0,10);
abcdk_allocator_t * p= abcdk_allocator_alloc2(100);
#pragma omp parallel for num_threads(user)
for (int i = 0; i < 100000; i++)
{
abcdk_allocator_t *q = abcdk_allocator_refer(p);
usleep(10*1000);
abcdk_allocator_unref(&q);
}
abcdk_allocator_unref(&p);
}
typedef struct _one_node
{
int id;
abcdk_comm_message_t *in_buffer;
abcdk_comm_message_t *out_buffer;
abcdk_comm_queue_t *out_queue;
abcdk_comm_node_t *node;
abcdk_comm_waiter_t *rsp;
}one_node_t;
int smb_protocol(abcdk_comm_node_t *node, abcdk_comm_message_t *msg)
{
size_t off = abcdk_comm_message_offset(msg);
if (off < 4)
return 0;
size_t len = abcdk_endian_b_to_h32(ABCDK_PTR2U32(abcdk_comm_message_data(msg), 0));
if (len != abcdk_comm_message_size(msg))
{
abcdk_comm_message_realloc(msg, len);
return 0;
}
else if (len != abcdk_comm_message_offset(msg))
{
return 0;
}
return 1;
}
void _output_event(one_node_t *one)
{
int chk;
NEXT_MSG:
if (!one->out_buffer)
{
one->out_buffer = abcdk_comm_queue_pop(one->out_queue);
if (!one->out_buffer)
return;
}
chk = abcdk_comm_message_send(one->node, one->out_buffer);
if (chk < 0)
{
abcdk_comm_set_timeout(one->node, 1);
return;
}
else if (chk == 0)
{
abcdk_comm_write_watch(one->node);
return;
}
/*释放消息缓存,并继续发送。*/
abcdk_comm_message_unref(&one->out_buffer);
goto NEXT_MSG;
}
void test_comm_message_cb(abcdk_comm_node_t *node, uint32_t event)
{
one_node_t *one = (one_node_t *)abcdk_comm_get_userdata(node);
switch (event)
{
case ABCDK_COMM_EVENT_ACCEPT:
{
assert(one == NULL);
one = (one_node_t*)abcdk_heap_alloc(sizeof(one_node_t));
one->out_queue = abcdk_comm_queue_alloc();
one->node = abcdk_comm_node_refer(node);
abcdk_comm_set_userdata(node,one);
abcdk_comm_read_watch(node);
}
break;
case ABCDK_COMM_EVENT_INPUT:
{
if(!one->in_buffer)
{
one->in_buffer = abcdk_comm_message_alloc(4);
abcdk_comm_message_protocol_set(one->in_buffer,smb_protocol);
}
int chk = abcdk_comm_message_recv(node,one->in_buffer);
if(chk < 0)
{
abcdk_comm_set_timeout(node,1);
}
else if(chk == 0)
{
abcdk_comm_read_watch(node);
}
else
{
abcdk_comm_message_t *msg_copy = abcdk_comm_message_refer(one->in_buffer);
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_read_watch(node);
// usleep(rand()%10000+1000);
abcdk_comm_message_reset(msg_copy);
abcdk_comm_queue_push(one->out_queue,msg_copy);
abcdk_comm_write_watch(one->node);
}
}
break;
case ABCDK_COMM_EVENT_OUTPUT:
_output_event(one);
break;
case ABCDK_COMM_EVENT_CLOSE:
default:
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_get_sockname(node, &sockname);
abcdk_comm_get_peername(node, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
printf("Socket: %s -> %s Disconnected.\n", sockname_str, peername_str);
if(one)
{
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_queue_free(&one->out_queue);
abcdk_comm_node_unref(&one->node);
abcdk_heap_free(one);
}
}
break;
}
}
void *test_send_msg(void *args)
{
one_node_t *one = (one_node_t *)args;
for (int i = 0; i < 1000; i++)
{
// usleep(10);
abcdk_comm_message_t *msg = abcdk_comm_message_alloc(128);
uint64_t mid = abcdk_time_clock2kind_with(0, 6);
abcdk_comm_waiter_request2(one->rsp,&mid);
ABCDK_PTR2U32(abcdk_comm_message_data(msg), 0) = abcdk_endian_h_to_b32(128);
ABCDK_PTR2U64(abcdk_comm_message_data(msg), 4) = abcdk_endian_h_to_b64(mid);
ABCDK_PTR2U32(abcdk_comm_message_data(msg), 12) = abcdk_endian_h_to_b32(i+1);
abcdk_comm_queue_push(one->out_queue, msg);
abcdk_comm_write_watch(one->node);
abcdk_comm_queue_t * q = abcdk_comm_waiter_wait2(one->rsp,&mid,1,10);
if(!q)
continue;
uint64_t a = abcdk_time_clock2kind_with(0,6);
printf("mid(%lu),timeout(%lu), count(%lu)\n",mid,a-mid,abcdk_comm_queue_count(q));
abcdk_comm_queue_free(&q);
}
return NULL;
}
void test_comm_message2_cb(abcdk_comm_node_t *node, uint32_t event)
{
one_node_t *one = (one_node_t *)abcdk_comm_get_userdata(node);
switch (event)
{
case ABCDK_COMM_EVENT_CONNECT:
{
one->out_queue = abcdk_comm_queue_alloc();
one->rsp = abcdk_comm_waiter_alloc();
one->node = abcdk_comm_node_refer(node);
// abcdk_comm_set_userdata(node,one);
abcdk_comm_read_watch(node);
abcdk_thread_t t;
t.routine = test_send_msg;
t.opaque = one;
abcdk_thread_create(&t,0);
}
break;
case ABCDK_COMM_EVENT_INPUT:
{
if(!one->in_buffer)
{
one->in_buffer = abcdk_comm_message_alloc(4);
abcdk_comm_message_protocol_set(one->in_buffer,smb_protocol);
}
int chk = abcdk_comm_message_recv(node,one->in_buffer);
if(chk != 1)
{
abcdk_comm_read_watch(node);
}
else
{
abcdk_comm_message_t *msg_copy = abcdk_comm_message_refer(one->in_buffer);
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_read_watch(node);
size_t len = abcdk_endian_b_to_h32(ABCDK_PTR2U32(abcdk_comm_message_data(msg_copy),0));
uint64_t mid = abcdk_endian_b_to_h64(ABCDK_PTR2U64(abcdk_comm_message_data(msg_copy),4));
uint32_t id = abcdk_endian_b_to_h32(ABCDK_PTR2U32(abcdk_comm_message_data(msg_copy), 12));
uint64_t a = abcdk_time_clock2kind_with(0,3);
//printf("mid=%lu,id=%u,time=%lu\n",mid,id,a-mid);
abcdk_comm_waiter_response2(one->rsp,&mid,msg_copy);
// abcdk_comm_message_unref(&msg_copy);
}
}
break;
case ABCDK_COMM_EVENT_OUTPUT:
_output_event(one);
break;
default:
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_get_sockname(node, &sockname);
abcdk_comm_get_peername(node, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
printf("Socket: %s -> %s Disconnected.\n", sockname_str, peername_str);
abcdk_comm_message_unref(&one->in_buffer);
abcdk_comm_queue_free(&one->out_queue);
abcdk_comm_node_unref(&one->node);
abcdk_comm_waiter_free(&one->rsp);
abcdk_heap_free(one);
}
break;
}
}
void test_comm(abcdk_tree_t *args)
{
signal(SIGPIPE,NULL);
abcdk_comm_start(0);
SSL_CTX *server_ssl_ctx = NULL;
SSL_CTX *client_ssl_ctx = NULL;
#ifdef HAVE_OPENSSL
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
server_ssl_ctx = abcdk_openssl_ssl_ctx_alloc(1, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(server_ssl_ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
// SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
// SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER, NULL);
client_ssl_ctx = abcdk_openssl_ssl_ctx_alloc(0, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(client_ssl_ctx, abcdk_option_get(args, "--crt2-file", 0, NULL),
abcdk_option_get(args, "--key2-file", 0, NULL),
abcdk_option_get(args, "--key2-pwd", 0, NULL));
// SSL_CTX_set_verify(client_ssl_ctx, SSL_VERIFY_PEER, NULL);
}
#endif //HAVE_OPENSSL
abcdk_sockaddr_t addr = {0};
abcdk_sockaddr_t addr2 = {0};
const char *listen_p = abcdk_option_get(args,"--listen",0,"0.0.0.0:12345");
abcdk_sockaddr_from_string(&addr,listen_p,0);
abcdk_comm_listen(server_ssl_ctx,&addr,test_comm_message_cb,NULL);
const char *connect_p = abcdk_option_get(args,"--connect",0,"127.0.0.1:12345");
abcdk_sockaddr_from_string(&addr2,connect_p,0);
abcdk_comm_connect(client_ssl_ctx,&addr2,test_comm_message2_cb,abcdk_heap_alloc(sizeof(one_node_t)));
while (getchar() != 'Q')
;
abcdk_comm_stop();
}
void test_easy_request_cb(abcdk_comm_easy_t *easy, const void *data, size_t len)
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_easy_get_sockname(easy, &sockname);
abcdk_comm_easy_get_peername(easy, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
// printf("Server(%s -> %s): ", sockname_str, peername_str);
if(!data)
{
printf(" Disconnected.\n");
}
else
{
uint64_t a = abcdk_time_clock2kind_with(CLOCK_MONOTONIC, 6);
uint64_t b = atoll((char*)data);
// printf("%lu-%lu=%lu\n",a,b,a-b);
usleep(rand()%10000+1000);
abcdk_comm_easy_response(easy,data,len);
abcdk_comm_easy_request(easy,data,len,NULL,0);
}
}
void test_easy_request2_cb(abcdk_comm_easy_t *easy, const void *data, size_t len)
{
abcdk_sockaddr_t sockname, peername;
abcdk_comm_easy_get_sockname(easy, &sockname);
abcdk_comm_easy_get_peername(easy, &peername);
char sockname_str[100] = {0}, peername_str[100] = {0};
if (sockname.family)
abcdk_sockaddr_to_string(sockname_str, &sockname);
if (peername.family)
abcdk_sockaddr_to_string(peername_str, &peername);
// printf("Client(%s -> %s): ", sockname_str, peername_str);
if(!data)
{
printf(" Disconnected.\n");
}
else
{
// printf(" %s\n",(char*)data);
}
}
void test_easy(abcdk_tree_t *args)
{
signal(SIGPIPE,NULL);
abcdk_comm_start(0);
SSL_CTX *server_ssl_ctx = NULL;
SSL_CTX *client_ssl_ctx[4] = {NULL};
#ifdef HAVE_OPENSSL
const char *capath = abcdk_option_get(args,"--ca-path",0,NULL);
if (capath)
{
server_ssl_ctx = abcdk_openssl_ssl_ctx_alloc(1, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(server_ssl_ctx, abcdk_option_get(args, "--crt-file", 0, NULL),
abcdk_option_get(args, "--key-file", 0, NULL),
abcdk_option_get(args, "--key-pwd", 0, NULL));
// SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, NULL);
SSL_CTX_set_verify(server_ssl_ctx, SSL_VERIFY_PEER, NULL);
for(int i =0;i<4;i++)
{
client_ssl_ctx[i] = abcdk_openssl_ssl_ctx_alloc(0, NULL, capath, 2);
abcdk_openssl_ssl_ctx_load_crt(client_ssl_ctx[i], abcdk_option_get(args, "--crt2-file", i, NULL),
abcdk_option_get(args, "--key2-file", i, NULL),
abcdk_option_get(args, "--key2-pwd", i, NULL));
SSL_CTX_set_verify(client_ssl_ctx[i], SSL_VERIFY_PEER, NULL);
}
}
#endif //HAVE_OPENSSL
abcdk_sockaddr_t addr = {0};
abcdk_sockaddr_t addr2 = {0};
const char *listen_p = abcdk_option_get(args,"--listen",0,"0.0.0.0:12345");
abcdk_sockaddr_from_string(&addr,listen_p,0);
abcdk_comm_easy_t *easy_listen = abcdk_comm_easy_listen(server_ssl_ctx,&addr,test_easy_request_cb,NULL);
const char *connect_p = abcdk_option_get(args,"--connect",0,"127.0.0.1:12345");
abcdk_sockaddr_from_string(&addr2,connect_p,0);
abcdk_comm_easy_t *easy_client[4] = {NULL};
for (int i = 0; i < 4; i++)
easy_client[i] = abcdk_comm_easy_connect(client_ssl_ctx[i], &addr2, test_easy_request2_cb, NULL);
uint64_t d = 0,s = 0;
s = abcdk_clock(d,&d);
#pragma omp parallel for num_threads(4)
for(int i = 0;i<1000000;i++)
{
uint64_t d = 0,s = 0;
s = abcdk_clock(d,&d);
int len = 10000;
char *req= (char*)abcdk_heap_alloc(len);
abcdk_comm_message_t *rsp= NULL;
sprintf(req,"%lu",abcdk_time_clock2kind_with(CLOCK_MONOTONIC, 6));
abcdk_comm_easy_request(easy_client[i%4],req,len,&rsp,1000);
if (rsp)
{
// printf("%d=%s\n",i,(char*)abcdk_comm_message_data(rsp));
abcdk_comm_message_unref(&rsp);
}
else
{
printf("Pipe(%d) %s timeout\n",i%4,req);
}
abcdk_heap_free(req);
s = abcdk_clock(d,&d);
// printf("[%d]:s = %lu,d = %lu\n",i,s,d);
}
s = abcdk_clock(d,&d);
printf("s = %lu,d = %lu\n",s,d);
// abcdk_comm_easy_set_timeout(easy_listen,1);
// abcdk_comm_easy_unref(&easy_listen);
for(int i = 0;i<4;i++)
abcdk_comm_easy_unref(&easy_client[i]);
while (getchar() != 'Q')
;
abcdk_comm_stop();
}
int test_blkid(abcdk_tree_t *args)
{
#ifdef HAVE_BLKID
int i, nparts;
char *devname;
blkid_probe pr;
blkid_partlist ls;
blkid_parttable root_tab;
devname = (char*)abcdk_option_get(args,"--dev",0,"");
pr = blkid_new_probe_from_filename(devname);
if (!pr)
return 1;
/* Binary interface */
ls = blkid_probe_get_partitions(pr);
if (!ls)
return 1;
/*
* Print info about the primary (root) partition table
*/
root_tab = blkid_partlist_get_table(ls);
if (!root_tab)
return 1;
printf("size: %jd, sector size: %u, PT: %s, offset: %jd, id=%s\n---\n",
blkid_probe_get_size(pr),
blkid_probe_get_sectorsize(pr),
blkid_parttable_get_type(root_tab),
blkid_parttable_get_offset(root_tab),
blkid_parttable_get_id(root_tab));
/*
* List partitions
*/
nparts = blkid_partlist_numof_partitions(ls);
if (!nparts)
goto done;
for (i = 0; i < nparts; i++) {
const char *p;
blkid_partition par = blkid_partlist_get_partition(ls, i);
blkid_parttable tab = blkid_partition_get_table(par);
printf("#%d: %10llu %10llu 0x%x",
blkid_partition_get_partno(par),
(unsigned long long) blkid_partition_get_start(par),
(unsigned long long) blkid_partition_get_size(par),
blkid_partition_get_type(par));
if (root_tab != tab)
/* subpartition (BSD, Minix, ...) */
printf(" (%s)", blkid_parttable_get_type(tab));
p = blkid_partition_get_name(par);
if (p)
printf(" name='%s'", p);
p = blkid_partition_get_uuid(par);
if (p)
printf(" uuid='%s'", p);
p = blkid_partition_get_type_string(par);
if (p)
printf(" type='%s'", p);
putc('\n', stdout);
}
done:
blkid_free_probe(pr);
#endif
return EXIT_SUCCESS;
}
int main(int argc, char **argv)
{
abcdk_openlog(NULL,LOG_DEBUG,1);
srand(time(NULL));
abcdk_tree_t *args = abcdk_tree_alloc3(1);
abcdk_getargs(args,argc,argv,"--");
abcdk_option_fprintf(stderr,args,NULL);
const char *func = abcdk_option_get(args,"--func",0,"");
// abcdk_clock_reset();
int a = 0x112233;
int b = 0;
char a8[3] = {0};
abcdk_endian_h_to_b24(a8,a);
b = abcdk_endian_b_to_h24(a8);
assert(a == b);
abcdk_endian_h_to_l24(a8,a);
b = abcdk_endian_l_to_h24(a8);
assert(a == b);
uint64_t c = 1234567890987654321;
uint64_t d = 0,e = 0;
d = abcdk_endian_h_to_b64(c);
e = abcdk_endian_b_to_h64(d);
assert(c == e);
uint64_t f = 0,g = 0;
g = abcdk_clock(f,&f);
for(int i = 0;i<100000;i++)
{
void *p = abcdk_heap_alloc(1024);
abcdk_heap_free(p);
}
g = abcdk_clock(f,&f);
printf("g = %lu,f = %lu\n",g,f);
for (int i = 0; i < 10000; i++)
{
int v = rand() % 127;
int k = ABCDK_CLAMP(v,33,126);
assert(k >= 33 && k <= 126);
}
#ifdef HAVE_OPENSSL
SSL_library_init();
OpenSSL_add_all_algorithms();
SSL_load_error_strings();
#endif //HAVE_OPENSSL
if(abcdk_strcmp(func,"test_ffmpeg",0)==0)
test_ffmpeg(args);
if(abcdk_strcmp(func,"test_bmp",0)==0)
test_bmp(args);
if(abcdk_strcmp(func,"test_freeimage",0)==0)
test_freeimage(args);
if(abcdk_strcmp(func,"test_uri",0)==0)
test_uri(args);
if (abcdk_strcmp(func, "test_strrep", 0) == 0)
test_strrep(args);
if (abcdk_strcmp(func, "test_html", 0) == 0)
test_html(args);
if (abcdk_strcmp(func, "test_fnmatch", 0) == 0)
test_fnmatch(args);
if (abcdk_strcmp(func, "test_crc32", 0) == 0)
test_crc32(args);
if (abcdk_strcmp(func, "test_robots", 0) == 0)
test_robots(args);
if (abcdk_strcmp(func, "test_fuse", 0) == 0)
test_fuse(args);
if (abcdk_strcmp(func, "test_mp4", 0) == 0)
test_mp4(args);
if (abcdk_strcmp(func, "test_dirent", 0) == 0)
test_dirent(args);
if (abcdk_strcmp(func, "test_netlink", 0) == 0)
test_netlink(args);
if (abcdk_strcmp(func, "test_iwscan", 0) == 0)
test_iwscan(args);
if (abcdk_strcmp(func, "test_hexdump", 0) == 0)
test_hexdump(args);
if (abcdk_strcmp(func, "test_video", 0) == 0)
test_video(args);
if (abcdk_strcmp(func, "test_com", 0) == 0)
test_com(args);
if (abcdk_strcmp(func, "test_mpi", 0) == 0)
test_mpi(args);
if (abcdk_strcmp(func, "test_lz4", 0) == 0)
test_lz4(args);
if (abcdk_strcmp(func, "test_archive", 0) == 0)
test_archive(args);
if (abcdk_strcmp(func, "test_modbus", 0) == 0)
test_modbus(args);
if (abcdk_strcmp(func, "test_libusb", 0) == 0)
test_libusb(args);
if (abcdk_strcmp(func, "test_openssl", 0) == 0)
test_openssl(args);
if (abcdk_strcmp(func, "test_mqtt", 0) == 0)
test_mqtt(args);
if (abcdk_strcmp(func, "test_http", 0) == 0)
test_http(args);
if (abcdk_strcmp(func, "test_redis", 0) == 0)
test_redis(args);
if (abcdk_strcmp(func, "test_cert_verify", 0) == 0)
test_cert_verify(args);
if (abcdk_strcmp(func, "test_json", 0) == 0)
test_json(args);
if (abcdk_strcmp(func, "test_refer_count", 0) == 0)
test_refer_count(args);
if (abcdk_strcmp(func, "test_comm", 0) == 0)
test_comm(args);
if (abcdk_strcmp(func, "test_easy", 0) == 0)
test_easy(args);
if (abcdk_strcmp(func, "test_blkid", 0) == 0)
test_blkid(args);
abcdk_tree_free(&args);
return 0;
}
|
c-smallpt.c | //-----------------------------------------------------------------------------
// Includes
//-----------------------------------------------------------------------------
#pragma region
#include "imageio.h"
#include "sampling.h"
#include "specular.h"
#include "sphere.h"
#pragma endregion
//-----------------------------------------------------------------------------
// Defines
//-----------------------------------------------------------------------------
#pragma region
#define REFRACTIVE_INDEX_OUT 1.0
#define REFRACTIVE_INDEX_IN 1.5
#pragma endregion
//-----------------------------------------------------------------------------
// Declarations and Definitions
//-----------------------------------------------------------------------------
#pragma region
const Sphere spheres[] = {
{ 1e5, { 1e5 + 1.0, 40.8, 81.6 }, { 0.0, 0.0, 0.0 }, { 0.75,0.25,0.25 }, DIFFUSE}, //Left
{ 1e5, { -1e5 + 99.0, 40.8, 81.6 }, { 0.0, 0.0, 0.0 }, { 0.25,0.25,0.75 }, DIFFUSE}, //Right
{ 1e5, { 50.0, 40.8, 1e5 }, { 0.0, 0.0, 0.0 }, { 0.75, 0.75, 0.75 }, DIFFUSE}, //Back
{ 1e5, { 50.0, 40.8, -1e5 + 170.0 }, { 0.0, 0.0, 0.0 }, { 0.0, 0.0, 0.0 }, DIFFUSE}, //Front
{ 1e5, { 50.0, 1e5, 81.6 }, { 0.0, 0.0, 0.0 }, { 0.75, 0.75, 0.75 }, DIFFUSE}, //Bottom
{ 1e5, { 50.0, -1e5 + 81.6, 81.6 }, { 0.0, 0.0, 0.0 }, { 0.75, 0.75, 0.75 }, DIFFUSE}, //Top
{ 16.5, { 27.0, 16.5, 47.0 }, { 0.0, 0.0, 0.0 }, { 0.999, 0.999, 0.999 }, SPECULAR}, //Mirror
{ 16.5, { 73.0, 16.5, 78.0 }, { 0.0, 0.0, 0.0 }, { 0.999, 0.999, 0.999 }, REFRACTIVE}, //Glass
{ 600.0, { 50.0, 681.6 - .27, 81.6 }, { 12.0, 12.0, 12.0 }, { 0.0, 0.0, 0.0 }, DIFFUSE} //Light
};
inline bool intersect(Ray* ray, size_t* id) {
bool hit = false;
const size_t n = sizeof(spheres) / sizeof(Sphere);
for (size_t i = 0u; i < n; ++i) {
if (intersect_sphere(&spheres[i], ray)) {
hit = true;
*id = i;
}
}
return hit;
}
static Vector3 radiance(Ray* ray, unsigned short xseed[3]) {
Ray* r = ray;
Vector3 L = { 0.0, 0.0, 0.0 };
Vector3 F = { 1.0, 1.0, 1.0 };
while (true) {
size_t id;
if (!intersect(r, &id)) {
return L;
}
const Sphere* shape = &spheres[id];
const Vector3 p = eval_r(r, r->tmax);
Vector3 n = sub_v3v3(&p, &shape->p);
normalize_v3(&n);
const Vector3 l = mul_v3v3(&F, &shape->e);
L = add_v3v3(&L, &l);
F = mul_v3v3(&F, &shape->f);
// Russian roulette
if (4u < r->depth) {
const double continue_probability = max_v3(&shape->f);
if (erand48(xseed) >= continue_probability) {
return L;
}
F = div_v3d(&F, continue_probability);
}
// Next path segment
switch (shape->reflection_t) {
case SPECULAR: {
r->o = p;
r->d = ideal_specular_reflect(&r->d, &n);
r->tmin = EPSILON_SPHERE;
r->tmax = INFINITY;
r->depth++;
break;
}
case REFRACTIVE: {
r->o = p;
double pr;
r->d = ideal_specular_transmit(&r->d, &n, REFRACTIVE_INDEX_OUT, REFRACTIVE_INDEX_IN, &pr, xseed);
F = mul_v3d(&F, pr);
r->tmin = EPSILON_SPHERE;
r->tmax = INFINITY;
r->depth++;
break;
}
default: {
const Vector3 w = (0.0 > dot_v3v3(&n, &r->d)) ? n : minus_v3(&n);
Vector3 _u = { 0.0, 0.0, 0.0 };
if (fabs(w.x) > 0.1) {
_u.y = 1.0;
}
else {
_u.x = 1.0;
}
Vector3 u = cross_v3v3(&_u, &w);
normalize_v3(&u);
const Vector3 v = cross_v3v3(&w, &u);
const Vector3 sample_d = cosine_weighted_sample_on_hemisphere(erand48(xseed), erand48(xseed));
const Vector3 _x = mul_dv3(sample_d.x, &u);
const Vector3 _y = mul_dv3(sample_d.y, &v);
const Vector3 _z = mul_dv3(sample_d.z, &w);
const Vector3 _xy = add_v3v3(&_x, &_y);
Vector3 d = add_v3v3(&_xy, &_z);
r->o = p;
r->d = *normalize_v3(&d);;
r->tmin = EPSILON_SPHERE;
r->tmax = INFINITY;
r->depth++;
}
}
}
}
int main(int argc, char* argv[]) {
const unsigned int nb_samples = (2 == argc) ? atoi(argv[1]) / 4 : 1;
const unsigned int w = 1024u;
const unsigned int h = 768u;
const Vector3 eye = { 50, 52, 295.6 };
Vector3 gaze = { 0, -0.042612, -1 };
normalize_v3(&gaze);
const double fov = 0.5135;
const Vector3 cx = { w * fov / h, 0.0, 0.0 };
Vector3 _cy = cross_v3v3(&cx, &gaze);
normalize_v3(&_cy);
const Vector3 cy = mul_v3d(&_cy, fov);
Vector3* Ls = malloc(w * h * sizeof(Vector3));
int y;
#pragma omp parallel for schedule(static)
for (y = 0; y < (int)h; ++y) { // pixel row
unsigned short xseed[3] = { 0, 0, (unsigned short)(y * y * y) };
for (unsigned int x = 0u; x < w; ++x) { // pixel column
for (unsigned int sy = 0u, i = (h - 1u - y) * w + x; sy < 2u; ++sy) { // 2 subpixel row
for (unsigned int sx = 0u; sx < 2u; ++sx) { // 2 subpixel column
Vector3 L = { 0.0, 0.0, 0.0 };
for (unsigned int s = 0u; s < nb_samples; ++s) { // samples per subpixel
const double u1 = 2.0 * erand48(xseed);
const double u2 = 2.0 * erand48(xseed);
const double dx = (u1 < 1.0f) ? sqrt(u1) - 1.0 : 1.0 - sqrt(2.0 - u1);
const double dy = (u2 < 1.0f) ? sqrt(u2) - 1.0 : 1.0 - sqrt(2.0 - u2);
const Vector3 _a = mul_v3d(&cx, (((sx + 0.5 + dx) / 2.0 + x) / w - 0.5));
const Vector3 _b = mul_v3d(&cy, (((sy + 0.5 + dy) / 2.0 + y) / h - 0.5));
const Vector3 _ab = add_v3v3(&_a, &_b);
Vector3 d = add_v3v3(&_ab, &gaze);
const Vector3 d130 = mul_v3d(&d, 130.0);
Ray ray = { add_v3v3(&eye, &d130), *normalize_v3(&d), EPSILON_SPHERE, INFINITY, 0 };
const Vector3 _l = radiance(&ray, xseed);
const Vector3 l = div_v3d(&_l, (double)nb_samples);
L = add_v3v3(&L, &l);
}
const Vector3 _l = clamp_v3(&L, 0.0, 1.0);
const Vector3 l = mul_dv3(0.25, &_l);
Ls[i] = add_v3v3(&Ls[i], &l);
}
}
}
}
write_ppm(w, h, Ls, "openmp-c-image.ppm");
free(Ls);
}
#pragma endregion
|
csr_matvec_oomp.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Matvec functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#include "_hypre_utilities.hpp"
#if defined(HYPRE_USING_DEVICE_OPENMP)
/*--------------------------------------------------------------------------
* hypre_CSRMatrixMatvec
*--------------------------------------------------------------------------*/
/* y[offset:end] = alpha*A[offset:end,:]*x + beta*b[offset:end] */
HYPRE_Int
hypre_CSRMatrixMatvecOutOfPlaceOOMP( HYPRE_Int trans,
HYPRE_Complex alpha,
hypre_CSRMatrix *A,
hypre_Vector *x,
HYPRE_Complex beta,
hypre_Vector *b,
hypre_Vector *y,
HYPRE_Int offset )
{
HYPRE_Int A_nrows = hypre_CSRMatrixNumRows(A);
HYPRE_Int A_ncols = hypre_CSRMatrixNumCols(A);
HYPRE_Int A_nnz = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Complex *A_data = hypre_CSRMatrixData(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A) + offset;
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Int y_size = hypre_VectorSize(y) - offset;
HYPRE_Complex *x_data = hypre_VectorData(x);
HYPRE_Complex *b_data = hypre_VectorData(b) + offset;
HYPRE_Complex *y_data = hypre_VectorData(y) + offset;
HYPRE_Int i;
#ifdef HYPRE_USING_CUSPARSE
cusparseHandle_t handle = hypre_HandleCusparseHandle(hypre_handle());
cusparseMatDescr_t descr = hypre_HandleCusparseMatDescr(hypre_handle());
#endif
//hypre_CSRMatrixPrefetch(A, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE);
//hypre_SeqVectorPrefetch(b, HYPRE_MEMORY_DEVICE);
//if (b != y)
//{
// hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE);
//}
if (b != y)
{
#pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, b_data)
for (i = 0; i < y_size; i++)
{
y_data[i] = b_data[i];
}
}
if (x == y)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR::x and y are the same pointer in hypre_CSRMatrixMatvecDevice\n");
}
// TODO
if (offset != 0)
{
hypre_printf("WARNING:: Offset is not zero in hypre_CSRMatrixMatvecDevice :: \n");
}
hypre_assert(offset == 0);
if (trans)
{
HYPRE_Complex *csc_a = hypre_TAlloc(HYPRE_Complex, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_j = hypre_TAlloc(HYPRE_Int, A->num_nonzeros, HYPRE_MEMORY_DEVICE);
HYPRE_Int *csc_i = hypre_TAlloc(HYPRE_Int, A->num_cols+1, HYPRE_MEMORY_DEVICE);
HYPRE_CUSPARSE_CALL( cusparseDcsr2csc(handle, A->num_rows, A->num_cols, A->num_nonzeros,
A->data, A->i, A->j, csc_a, csc_j, csc_i,
CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO) );
#ifdef HYPRE_USING_CUSPARSE
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A->num_cols, A->num_rows, A->num_nonzeros,
&alpha, descr,
csc_a, csc_i, csc_j,
x->data, &beta, y->data) );
#else
#pragma omp target teams distribute parallel for private(i) is_device_ptr(csc_a, csc_i, csc_j, y_data, x_data)
for (i = 0; i < A_ncols; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = csc_i[i]; j < csc_i[i+1]; j++)
{
tempx += csc_a[j] * x_data[csc_j[j]];
}
y_data[i] = alpha*tempx + beta*y_data[i];
}
#endif
hypre_TFree(csc_a, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_i, HYPRE_MEMORY_DEVICE);
hypre_TFree(csc_j, HYPRE_MEMORY_DEVICE);
}
else
{
#ifdef HYPRE_USING_CUSPARSE
HYPRE_CUSPARSE_CALL( cusparseDcsrmv(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
A_nrows, A_ncols, A_nnz,
&alpha, descr,
A_data, A_i, A_j,
x_data, &beta, y_data) );
#else
#pragma omp target teams distribute parallel for private(i) is_device_ptr(A_data, A_i, A_j, y_data, x_data)
for (i = 0; i < A_num_rows; i++)
{
HYPRE_Complex tempx = 0.0;
HYPRE_Int j;
for (j = A_i[i]; j < A_i[i+1]; j++)
{
tempx += A_data[j] * x_data[A_j[j]];
}
y_data[i] = alpha*tempx + beta*y_data[i];
}
#endif
}
return hypre_error_flag;
}
#endif /* #if defined(HYPRE_USING_DEVICE_OPENMP) */
|
ast-dump-openmp-target-enter-data.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test(int x) {
#pragma omp target enter data map(to \
: x)
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: `-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-enter-data.c:3:1, line:6:1> line:3:6 test 'void (int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:11, col:15> col:15 used x 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:18, line:6:1>
// CHECK-NEXT: `-OMPTargetEnterDataDirective {{.*}} <line:4:1, line:5:39> openmp_standalone_directive
// CHECK-NEXT: |-OMPMapClause {{.*}} <line:4:31, line:5:38>
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:37> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:4:1>
// CHECK-NEXT: `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: |-CompoundStmt {{.*}} <col:1>
// CHECK-NEXT: |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: `-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-enter-data.c:4:1) *const restrict'
|
GB_unaryop__abs_int16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_uint8
// op(A') function: GB_tran__abs_int16_uint8
// C type: int16_t
// A type: uint8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_uint8
(
int16_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
BoWBuilder.h | //
// BoWBuilder.h
// BoVW
//
// Created by willard on 15/7/21.
// Copyright (c) 2015年 wilard. All rights reserved.
//
#ifndef __BOW_BUILDER__
#define __BOW_BUILDER__
#include <iostream>
#include <vector>
#include <string>
#include <set>
#include <utility>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "cppsugar/cppsugar"
#ifdef _OPENMP
#include <omp.h>
#endif
using namespace std;
using namespace cv;
using namespace cv::flann;
using namespace func;
using namespace util;
struct BoW
{
int id;
vector<float> bow;
BoW(): id(-1) {};
BoW(int _id, const std::vector<float> &_bow): id(_id), bow(_bow) {};
void Serialize(ofstream &ofs) const {
ofs.write((char *)&id, sizeof(int));
int size = bow.size();
ofs.write((char *)&size, sizeof(int));
ofs.write((char *)&bow[0], sizeof(float) * size);
}
static BoW Deserialize(ifstream &ifs) {
BoW bow;
ifs.read((char *)&bow.id, sizeof(int));
int size = 0;
ifs.read((char *)&size, sizeof(int));
bow.bow.resize(size);
ifs.read((char *)&bow.bow[0], sizeof(float) * size);
return bow;
}
};
struct BoWCollection
{
vector<BoW> bows;
BoWCollection() {}
BoWCollection(int _size): bows(_size) {}
void Serialize(ofstream &ofs) const {
int size = bows.size();
ofs.write((char *)&size, sizeof(int));
for (const auto &bow : bows) {
bow.Serialize(ofs);
}
}
static BoWCollection Deserialize(ifstream &ifs) {
BoWCollection bows;
int size = 0;
ifs.read((char *)&size, sizeof(int));
for (int i = 0; i < size; i++) {
auto bow = BoW::Deserialize(ifs);
bows.bows.push_back(bow);
}
return bows;
}
};
class BoWBuilder
{
public:
// some default settings
const int DICT_SIZE = 100000; // vitual words
//const int DICT_SIZE = 10000;
const int FEATURE_DIMENSION = 128; // for SIFT
const int KMEANS_MAX_ITERATION = 75;
CompositeIndexParams QuantizationIndex;
BoWBuilder(void) {};
~BoWBuilder(void) {};
Mat ExtractSIFTFeature(const string &imgfn) const {
auto img = imread(imgfn, true); //imgfn: image file name
vector<KeyPoint> keypoints;
SiftFeatureDetector detector;
detector.detect(img, keypoints);
SiftDescriptorExtractor extractor;
Mat descriptors;
if (!keypoints.size()) {
return Mat();
}
extractor.compute(img, keypoints, descriptors);
// transform sift to rootSIFT, row is the number of features, cols is 128 dimension
for (int y = 0; y < descriptors.rows; y++) {
for (int x = 0; x < descriptors.cols; x++){
descriptors.at<float>(y, x) = sqrt(descriptors.at<float>(y, x));
}
}
float threshold = pow(10, 12);
// descriptors are not normalized, do L2 normalization here.
for (int y = 0; y < descriptors.rows; y++) {
// first get the square sum
float sum = 0;
for (int x = 0; x < descriptors.cols; x++) {
sum += descriptors.at<float>(y, x) * descriptors.at<float>(y, x);
}
sum = sqrt(sum);
sum = max(sum, threshold); // see vgg
if (sum)
descriptors.row(y) /= sum;
}
cerr << descriptors.rows << " feature extracted." << endl;
return descriptors;
}
// Extract sparse SIFT feature from the given images, the returned value is a vector of feature.
// Each vector contains the descriptors of each image.
vector<Mat> ExtractSIFTFeatures(const vector<string> &imgfns) const {
vector<Mat> features(imgfns.size());
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int i = 0; i < imgfns.size(); i++) {
features[i] = ExtractSIFTFeature(imgfns[i]);
}
return features;
}
// build a codebook from a file containing the file names of the images, sparse SIFT is used.
// k is the vocabulary size. The features are returned as features.
// hard approximate quantization is also done using the intermediate result of KMeans
Mat BuildCodebookAndQuantize(const vector<string> &imgfns, vector<Mat> &features, BoWCollection &bows) const
{
auto c = clock();
int k = BoWBuilder::DICT_SIZE;
// for our current scale, don't need to subsample the features
features = ExtractSIFTFeatures(imgfns);
cerr << "Feature extraction complete." << endl;
int totalNum = 0;
for (int i = 0; i < features.size(); i++) {
totalNum += features[i].rows;
}
// int totalNum = Sum<Mat, int>(features, [](const Mat &v) { return v.rows; });
cerr << totalNum << " features extracted. " << k << " visual words to cluster, " << (float)(totalNum * 100 / k) / 100
<< " features per word in average." << endl;
// first randomly generate the cluster centers
cerr << "Initializing centers...";
vector<int> centerIds(totalNum);
for (int i = 0; i < totalNum; i++) centerIds[i] = i;
random_shuffle(centerIds.begin(), centerIds.end());
set<int> centerIdSet;
for (int i = 0; i < k; i++) centerIdSet.insert(centerIds[i]);
centerIds.clear();
cerr << "done.\nConstruct data structure..." << endl;
// copy the selected features to centersMat
Mat centersMat(k, BoWBuilder::FEATURE_DIMENSION, DataType<float>::type);
int currentIdx = 0, centersIdx = 0;
for (auto it = features.begin(); it != features.end(); it++)
{
for (int i = 0; i < it->rows; i++)
{
if (centerIdSet.find(currentIdx) != centerIdSet.end())
{
memcpy((char *)(centersMat.datastart + centersIdx * centersMat.step), it->datastart + i * it->step, sizeof(float) * FEATURE_DIMENSION);
centersIdx++;
}
currentIdx++;
}
}
// actual iteration
cerr << "Actual iteration...";
vector<int> labels(totalNum);
Mat centersSum(k, FEATURE_DIMENSION, DataType<float>::type);
// for more effient parallelization, use a two-stage indexing system
vector<pair<int, int>> idx;
idx.reserve(totalNum);
for(int i = 0; i < features.size(); i++)
{
for (int j = 0; j < features[i].rows; j++) {
idx.push_back(pair<int, int>(i, j));
}
}
for (int iter = 0; iter < KMEANS_MAX_ITERATION; iter++)
{
// build index
Index index(centersMat, KDTreeIndexParams());
// assign to the cluster centers
vector<int> labelCount(k);
memset(centersSum.datastart, 0, centersSum.dataend - centersSum.datastart);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (int id = 0; id < idx.size(); id++) {
int i = idx[id].first;
int j = idx[id].second;
Mat nn(1, 1, DataType<int>::type);
Mat dist(1, 1, DataType<float>::type);
index.knnSearch(features[i].row(j), nn, dist, 1, SearchParams());
int label = nn.at<int>(0, 0);
centersSum.row(label) += features[i].row(j);
labelCount[label]++;
}
for (int i = 0; i < k; i++)
if (labelCount[i])
centersSum.row(i) /= labelCount[i];
//actually updating the cluster centers
centersMat = centersSum.clone();
cerr << '.';
}
#ifdef __linux__
cerr << "done. " << (clock() - c) / 10000000 << "sec." << endl;
#else
cerr << "done. " << (clock() - c) / 1000 << "sec." << endl;
#endif
// do a last round for quantization
cerr << "Quantization... " << endl;
Index index(centersMat, KDTreeIndexParams());
bows = BoWCollection();
bows.bows.resize(features.size());
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(int i = 0; i < features.size(); i++)
{
if (!features[i].rows) {
bows.bows[i] = BoW(i, vector<float>(k));
continue;
}
Mat nn(features[i].rows, 1, DataType<int>::type);
Mat dist(features[i].rows, 1, DataType<float>::type);
index.knnSearch(features[i], nn, dist, 1, SearchParams());
vector<float> bow(k);
for (int j = 0; j < features[i].rows; j++) {
bow[(int)nn.at<int>(j, 0)]++;
}
// L1 normalization
float bowSum = Sum(bow);
for (auto &b : bow) { b /= bowSum; }
bows.bows[i] = BoW(i, bow);
// L2 normalization
//float accum = sqrt(std::inner_product(bows.bows[i].bow.begin(), bows.bows[i].bow.end(), bows.bows[i].bow.begin(), 0.0));
//for (auto &b : bows.bows[i].bow) { b /= accum; }
}
return centersMat;
}
Mat ReadCodebook() const {
vector<float> buff;
ifstream ifs("dict.dat", ios::binary);
if (!ifs) { throw runtime_error("Cannot open file."); };
int w = 0, h = 0;
ifs.read((char *)&h, sizeof(int));
ifs.read((char *)&w, sizeof(int));
buff.resize(w * h);
ifs.read((char *)&buff[0], sizeof(float) * buff.size());
ifs.close();
// row major
return Mat(buff, true).reshape(1, h);
}
void WriteCodebook(const Mat &dict) const {
ofstream ofs("dict.dat", ios::binary);
if (!ofs) { throw runtime_error("Cannot open file."); }
ofs.write((char *)&dict.rows, sizeof(int));
ofs.write((char *)&dict.cols, sizeof(int));
ofs.write((char *)dict.data, dict.dataend - dict.data);
ofs.close();
}
// extract features from the given image and the roi, and then return the quantization/pooling result.
vector<float> Quantize(const Mat &dict, string imgfn) const {
auto feature = ExtractSIFTFeature(imgfn);
Index index(dict, KDTreeIndexParams());
Mat nn(feature.rows, 1, DataType<int>::type);
Mat dist(feature.rows, 1, DataType<float>::type);
index.knnSearch(feature, nn, dist, 1, SearchParams());
vector<float> bow(dict.rows);
for (int j = 0; j < feature.rows; j++) {
bow[nn.at<int>(j, 0)]++;
}
// L1 normalization
float bowSum = Sum(bow);
for (auto &b : bow) { b /= bowSum; }
// l2-norm
//float accum = sqrt(std::inner_product(bow.begin(), bow.end(), bow.begin(), 0.0));
//for (auto &b : bow) { b /= accum; }
return bow;
}
};
#endif
|
performance.c | #include "performance.h"
// from GNU.org
int compare_doubles (const void *a, const void *b)
{
const double *da = (const double *) a;
const double *db = (const double *) b;
return (*da > *db) - (*da < *db);
}
void performance_test(Parameters * p){
int i;
int tests_remain;
double t_start, t_end, t=0.0;
double t_min = 1000000.0, t_max= -1.0, t_med, tpercycle, tpertest;
double t_ts_main_max= -1.0, t_ts_main_min = 1000000.0;
double t2;
double *ttests = (double*) malloc(p->n_tests*sizeof(double));
// Create the required MPI data structures
mpi_halo_init(p);
time_t now;
if (p->mpi_rank == 0) {
time(&now);
printf("Started on %s", ctime(&now));
// print the used parameters in the experiment if required
if(p->verbose ==1) print_param(*p);
}
// allocate and initialize the required arrays for the performance experiments
arrays_allocate(p);
init_coeff(p);
domain_data_fill(p);
if(p->source_point_enabled == 1)
for(i=0; i<p->nt; i++) p->source[i] = (real_t) i;
// run the performance experiments of the target kernel
if (p->mpi_rank == 0) {
printf("\n******************************************************\n");
printf("Performance results\n");
printf("******************************************************\n");
}
tests_remain = p->n_tests;
if(p->target_ts != 2) {
#pragma omp parallel
{
MARKER_START("calc");
}
}
while(tests_remain--) {
reset_timers(&(p->prof));
reset_wf_timers(p);
MPI_Barrier(MPI_COMM_WORLD);
t_start = MPI_Wtime();
TSList[p->target_ts].func(p);
t2 = MPI_Wtime();
MPI_Barrier(MPI_COMM_WORLD);
t_end = MPI_Wtime();
tpertest = t_end - t_start;
tpercycle = (t_end - t_start) / p->nt;
ttests[tests_remain] = tpertest;
p->prof.total = t_end - t_start;
p->prof.wait += (t_end - t2);
p->prof.communicate += p->prof.wait;
p->prof.others = p->prof.total - p->prof.communicate - p->prof.compute;
#if PRINT_TIME_DETAILS
if (p->mpi_rank == 0) {
if (tests_remain > 0)
printf("Rank 0 TEST#%02d time: %e\n",(p->n_tests - tests_remain),tpertest);
else if (tests_remain == 0) {
printf("Rank 0 TEST#%02d time: %e\n",(p->n_tests - tests_remain),tpertest);
printf("******************************************************\n");
}
}
#endif
t += tpertest/p->n_tests;
if (t_min > tpercycle) t_min = tpercycle;
if (t_max < tpercycle) t_max = tpercycle;
if (t_ts_main_min > p->prof.ts_main) t_ts_main_min = p->prof.ts_main;
if (t_ts_main_max < p->prof.ts_main) t_ts_main_max = p->prof.ts_main;
}
if(p->target_ts != 2) {
#pragma omp parallel
{
MARKER_STOP("calc");
}
}
// compute the tests median
qsort(ttests, p->n_tests, sizeof(double), compare_doubles);
t_med = ttests[p->n_tests/2];
// collect and print the performance results
performance_results(p, t, t_max, t_min, t_med, t_ts_main_max, t_ts_main_min);
// clean up
if (p->mpi_rank == 0) {
time(&now);
printf("COMPLETED SUCCESSFULLY on %s", ctime(&now));
}
mpi_halo_finalize(p);
arrays_free(p);
free(ttests);
}
|
DRB102-copyprivate-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* threadprivate+copyprivate: no data races
*/
#include <stdio.h>
float x=0.0;
int y=0;
#pragma omp threadprivate(x,y)
int main (int argc, char * argv[])
{
#pragma omp parallel
{
#pragma omp single copyprivate(x,y)
{
x=1.0;
y=1;
}
}
printf ("x=%f y=%d\n", x, y);
return 0;
}
|
fm_loss.h | /**
* Copyright (c) 2015 by Contributors
*/
#ifndef ZDIFACTO_LOSS_FM_LOSS_H_
#define ZDIFACTO_LOSS_FM_LOSS_H_
#include <vector>
#include <cmath>
#include "zdifacto/base.h"
#include "dmlc/data.h"
#include "dmlc/io.h"
#include "zdifacto/loss.h"
#include "common/spmv.h"
#include "common/spmm.h"
#include "logit_loss.h"
namespace zdifacto {
/**
* \brief parameters for FM loss
*/
struct FMLossParam : public dmlc::Parameter<FMLossParam> {
/**
* \brief the embedding dimension
*/
int V_dim;
DMLC_DECLARE_PARAMETER(FMLossParam) {
DMLC_DECLARE_FIELD(V_dim).set_range(0, 10000);
}
};
/**
* \brief the factonization machine loss
* :math:`f(x) = \langle w, x \rangle + \frac{1}{2} \|V x\|_2^2 - \sum_{i=1}^d x_i^2 \|V_i\|^2_2`
*/
class FMLoss : public Loss {
public:
FMLoss() {}
virtual ~FMLoss() {}
KWArgs Init(const KWArgs& kwargs) override {
return param_.InitAllowUnknown(kwargs);
}
/**
* \brief perform prediction
*
* pred = X * w + .5 * sum((X*V).^2 - (X.*X)*(V.*V), 2);
*
* where
* - sum(A, 2) : sum the rows of A
* - .* : elemenetal-wise times
*
* @param data the data
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], int vector, the w positions
* - param[2], int vector, the V positions
* @param pred predict output, should be pre-allocated
*/
void Predict(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* pred) override {
CHECK_EQ(param.size(), 3);
Predict(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
SArray<int>(param[2]),
pred);
}
void Predict(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
const SArray<int>& V_pos,
SArray<real_t>* pred) {
// pred = X * w
SArray<real_t> w = weights;
SpMV::Times(data, w, pred, nthreads_, w_pos, {});
int V_dim = param_.V_dim;
if (V_dim == 0) return;
SArray<real_t> V = weights;
// XV_ = X*V
XV_.clear();
XV_.resize(data.size * V_dim, 0);
SpMM::Times(data, V, V_dim, &XV_, nthreads_, V_pos);
// XX = X.*X
auto XX = data;
if (XX.value) {
XX_.clear();
XX_.CopyFrom(XX.value+XX.offset[0], XX.offset[XX.size] - XX.offset[0]);
for (auto& v : XX_) v *= v;
XX.value = XX_.data();
}
// VV = V*V
SArray<real_t> VV(V.size());
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < V_pos.size(); ++i) {
int p = V_pos[i];
if (p < 0) continue;
for (int j = 0; j < V_dim; ++j) VV[p+j] = V[p+j] * V[p+j];
}
// XXVV = XX*VV
SArray<real_t> XXVV(XV_.size());
SpMM::Times(XX, VV, V_dim, &XXVV, nthreads_, V_pos);
// py += .5 * sum((V.XV).^2 - xxvv)
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < pred->size(); ++i) {
real_t* t = XV_.data() + i * V_dim;
real_t* tt = XXVV.data() + i * V_dim;
real_t s = 0;
for (int j = 0; j < V_dim; ++j) s += t[j] * t[j] - tt[j];
(*pred)[i] += .5 * s;
}
// projection
for (auto& p : *pred) p = p > 20 ? 20 : (p < -20 ? -20 : p);
}
/*!
* \brief compute the gradients
*
* p = - y ./ (1 + exp (y .* pred));
* grad_w = X' * p;
* grad_u = X' * diag(p) * X * V - diag((X.*X)'*p) * V
*
* @param data the data
* @param param input parameters
* - param[0], real_t vector, the weights
* - param[1], int vector, the w positions
* - param[2], int vector, the V positions
* - param[3], real_t vector, the predict output
* @param grad the results
*/
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const std::vector<SArray<char>>& param,
SArray<real_t>* grad) override {
CHECK_EQ(param.size(), 4);
CalcGrad(data,
SArray<real_t>(param[0]),
SArray<int>(param[1]),
SArray<int>(param[2]),
SArray<real_t>(param[3]),
grad);
}
void CalcGrad(const dmlc::RowBlock<unsigned>& data,
const SArray<real_t>& weights,
const SArray<int>& w_pos,
const SArray<int>& V_pos,
const SArray<real_t>& pred,
SArray<real_t>* grad) {
// p = ...
SArray<real_t> p; p.CopyFrom(pred);
CHECK_EQ(p.size(), data.size);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
real_t y = data.label[i] > 0 ? 1 : -1;
p[i] = - y / (1 + std::exp(y * p[i]));
}
// grad_w = ...
SpMV::TransTimes(data, p, grad, nthreads_, {}, w_pos);
// grad_u = ...
int V_dim = param_.V_dim;
if (V_dim == 0) return;
SArray<real_t> V = weights;
// XXp = (X.*X)'*p
auto XX = data;
if (XX.value) {
CHECK_EQ(XX_.size(), XX.offset[XX.size] - XX.offset[0]);
XX.value = XX_.data();
}
SArray<real_t> XXp(V_pos.size());
SpMV::TransTimes(XX, p, &XXp, nthreads_);
// grad_u -= diag(XXp) * V,
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < V_pos.size(); ++i) {
int p = V_pos[i];
if (p < 0) continue;
for (int j = 0; j < V_dim; ++j) {
(*grad)[p+j] -= V[p+j] * XXp[i];
}
}
// XV_ = diag(p) * X * V
CHECK_EQ(XV_.size(), data.size * V_dim);
#pragma omp parallel for num_threads(nthreads_)
for (size_t i = 0; i < p.size(); ++i) {
for (int j = 0; j < V_dim; ++j) XV_[i*V_dim+j] *= p[i];
}
// grad_u += X' * diag(p) * X * V
SpMM::TransTimes(data, XV_, V_dim, grad, nthreads_, {}, V_pos);
}
private:
SArray<real_t> XV_;
SArray<dmlc::real_t> XX_;
FMLossParam param_;
};
} // namespace difacto
#endif // DIFACTO_LOSS_FM_LOSS_H_
|
csr_matrix.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
/******************************************************************************
*
* Member functions for hypre_CSRMatrix class.
*
*****************************************************************************/
#include "seq_mv.h"
#ifdef HYPRE_PROFILE
HYPRE_Real hypre_profile_times[HYPRE_TIMER_ID_COUNT] = { 0 };
#endif
/*--------------------------------------------------------------------------
* hypre_CSRMatrixCreate
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixCreate( HYPRE_Int num_rows,
HYPRE_Int num_cols,
HYPRE_Int num_nonzeros )
{
hypre_CSRMatrix *matrix;
matrix = hypre_CTAlloc(hypre_CSRMatrix, 1);
hypre_CSRMatrixData(matrix) = NULL;
hypre_CSRMatrixI(matrix) = NULL;
hypre_CSRMatrixJ(matrix) = NULL;
hypre_CSRMatrixRownnz(matrix) = NULL;
hypre_CSRMatrixNumRows(matrix) = num_rows;
hypre_CSRMatrixNumCols(matrix) = num_cols;
hypre_CSRMatrixNumNonzeros(matrix) = num_nonzeros;
/* set defaults */
hypre_CSRMatrixOwnsData(matrix) = 1;
hypre_CSRMatrixNumRownnz(matrix) = num_rows;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixDestroy( hypre_CSRMatrix *matrix )
{
HYPRE_Int ierr=0;
if (matrix)
{
hypre_TFree(hypre_CSRMatrixI(matrix));
hypre_CSRMatrixI(matrix) = NULL;
if (hypre_CSRMatrixRownnz(matrix))
hypre_TFree(hypre_CSRMatrixRownnz(matrix));
if ( hypre_CSRMatrixOwnsData(matrix) )
{
hypre_TFree(hypre_CSRMatrixData(matrix));
hypre_TFree(hypre_CSRMatrixJ(matrix));
hypre_CSRMatrixData(matrix) = NULL;
hypre_CSRMatrixJ(matrix) = NULL;
}
hypre_TFree(matrix);
matrix = NULL;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixInitialize( hypre_CSRMatrix *matrix )
{
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(matrix);
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(matrix);
/* HYPRE_Int num_rownnz = hypre_CSRMatrixNumRownnz(matrix); */
HYPRE_Int ierr=0;
if ( ! hypre_CSRMatrixData(matrix) && num_nonzeros )
hypre_CSRMatrixData(matrix) = hypre_CTAlloc(HYPRE_Complex, num_nonzeros);
if ( ! hypre_CSRMatrixI(matrix) )
hypre_CSRMatrixI(matrix) = hypre_CTAlloc(HYPRE_Int, num_rows + 1);
/* if ( ! hypre_CSRMatrixRownnz(matrix) )
hypre_CSRMatrixRownnz(matrix) = hypre_CTAlloc(HYPRE_Int, num_rownnz);*/
if ( ! hypre_CSRMatrixJ(matrix) && num_nonzeros )
hypre_CSRMatrixJ(matrix) = hypre_CTAlloc(HYPRE_Int, num_nonzeros);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixSetDataOwner( hypre_CSRMatrix *matrix,
HYPRE_Int owns_data )
{
HYPRE_Int ierr=0;
hypre_CSRMatrixOwnsData(matrix) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixSetRownnz
*
* function to set the substructure rownnz and num_rowsnnz inside the CSRMatrix
* it needs the A_i substructure of CSRMatrix to find the nonzero rows.
* It runs after the create CSR and when A_i is known..It does not check for
* the existence of A_i or of the CSR matrix.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixSetRownnz( hypre_CSRMatrix *matrix )
{
HYPRE_Int ierr=0;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(matrix);
HYPRE_Int *A_i = hypre_CSRMatrixI(matrix);
HYPRE_Int *Arownnz;
HYPRE_Int i, adiag;
HYPRE_Int irownnz=0;
for (i=0; i < num_rows; i++)
{
adiag = (A_i[i+1] - A_i[i]);
if(adiag > 0) irownnz++;
}
hypre_CSRMatrixNumRownnz(matrix) = irownnz;
if ((irownnz == 0) || (irownnz == num_rows))
{
hypre_CSRMatrixRownnz(matrix) = NULL;
}
else
{
Arownnz = hypre_CTAlloc(HYPRE_Int, irownnz);
irownnz = 0;
for (i=0; i < num_rows; i++)
{
adiag = A_i[i+1]-A_i[i];
if(adiag > 0) Arownnz[irownnz++] = i;
}
hypre_CSRMatrixRownnz(matrix) = Arownnz;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixRead
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_CSRMatrixRead( char *file_name )
{
hypre_CSRMatrix *matrix;
FILE *fp;
HYPRE_Complex *matrix_data;
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Int num_rows;
HYPRE_Int num_nonzeros;
HYPRE_Int max_col = 0;
HYPRE_Int file_base = 1;
HYPRE_Int j;
/*----------------------------------------------------------
* Read in the data
*----------------------------------------------------------*/
fp = fopen(file_name, "r");
hypre_fscanf(fp, "%d", &num_rows);
matrix_i = hypre_CTAlloc(HYPRE_Int, num_rows + 1);
for (j = 0; j < num_rows+1; j++)
{
hypre_fscanf(fp, "%d", &matrix_i[j]);
matrix_i[j] -= file_base;
}
num_nonzeros = matrix_i[num_rows];
matrix = hypre_CSRMatrixCreate(num_rows, num_rows, matrix_i[num_rows]);
hypre_CSRMatrixI(matrix) = matrix_i;
hypre_CSRMatrixInitialize(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
for (j = 0; j < num_nonzeros; j++)
{
hypre_fscanf(fp, "%d", &matrix_j[j]);
matrix_j[j] -= file_base;
if (matrix_j[j] > max_col)
{
max_col = matrix_j[j];
}
}
matrix_data = hypre_CSRMatrixData(matrix);
for (j = 0; j < matrix_i[num_rows]; j++)
{
hypre_fscanf(fp, "%le", &matrix_data[j]);
}
fclose(fp);
hypre_CSRMatrixNumNonzeros(matrix) = num_nonzeros;
hypre_CSRMatrixNumCols(matrix) = ++max_col;
return matrix;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixPrint
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixPrint( hypre_CSRMatrix *matrix,
char *file_name )
{
FILE *fp;
HYPRE_Complex *matrix_data;
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Int num_rows;
HYPRE_Int file_base = 1;
HYPRE_Int j;
HYPRE_Int ierr = 0;
/*----------------------------------------------------------
* Print the matrix data
*----------------------------------------------------------*/
matrix_data = hypre_CSRMatrixData(matrix);
matrix_i = hypre_CSRMatrixI(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
num_rows = hypre_CSRMatrixNumRows(matrix);
fp = fopen(file_name, "w");
hypre_fprintf(fp, "%d\n", num_rows);
for (j = 0; j <= num_rows; j++)
{
hypre_fprintf(fp, "%d\n", matrix_i[j] + file_base);
}
for (j = 0; j < matrix_i[num_rows]; j++)
{
hypre_fprintf(fp, "%d\n", matrix_j[j] + file_base);
}
if (matrix_data)
{
for (j = 0; j < matrix_i[num_rows]; j++)
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%.14e , %.14e\n",
hypre_creal(matrix_data[j]), hypre_cimag(matrix_data[j]));
#else
hypre_fprintf(fp, "%.14e\n", matrix_data[j]);
#endif
}
}
else
{
hypre_fprintf(fp, "Warning: No matrix data!\n");
}
fclose(fp);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixPrintHB: print a CSRMatrix in Harwell-Boeing format
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixPrintHB( hypre_CSRMatrix *matrix_input,
char *file_name )
{
FILE *fp;
hypre_CSRMatrix *matrix;
HYPRE_Complex *matrix_data;
HYPRE_Int *matrix_i;
HYPRE_Int *matrix_j;
HYPRE_Int num_rows;
HYPRE_Int file_base = 1;
HYPRE_Int j, totcrd, ptrcrd, indcrd, valcrd, rhscrd;
HYPRE_Int ierr = 0;
/*----------------------------------------------------------
* Print the matrix data
*----------------------------------------------------------*/
/* First transpose the input matrix, since HB is in CSC format */
hypre_CSRMatrixTranspose(matrix_input, &matrix, 1);
matrix_data = hypre_CSRMatrixData(matrix);
matrix_i = hypre_CSRMatrixI(matrix);
matrix_j = hypre_CSRMatrixJ(matrix);
num_rows = hypre_CSRMatrixNumRows(matrix);
fp = fopen(file_name, "w");
hypre_fprintf(fp, "%-70s Key \n", "Title");
ptrcrd = num_rows;
indcrd = matrix_i[num_rows];
valcrd = matrix_i[num_rows];
rhscrd = 0;
totcrd = ptrcrd + indcrd + valcrd + rhscrd;
hypre_fprintf (fp, "%14d%14d%14d%14d%14d\n",
totcrd, ptrcrd, indcrd, valcrd, rhscrd);
hypre_fprintf (fp, "%-14s%14i%14i%14i%14i\n", "RUA",
num_rows, num_rows, valcrd, 0);
hypre_fprintf (fp, "%-16s%-16s%-16s%26s\n", "(1I8)", "(1I8)", "(1E16.8)", "");
for (j = 0; j <= num_rows; j++)
{
hypre_fprintf(fp, "%8d\n", matrix_i[j] + file_base);
}
for (j = 0; j < matrix_i[num_rows]; j++)
{
hypre_fprintf(fp, "%8d\n", matrix_j[j] + file_base);
}
if (matrix_data)
{
for (j = 0; j < matrix_i[num_rows]; j++)
{
#ifdef HYPRE_COMPLEX
hypre_fprintf(fp, "%16.8e , %16.8e\n",
hypre_creal(matrix_data[j]), hypre_cimag(matrix_data[j]));
#else
hypre_fprintf(fp, "%16.8e\n", matrix_data[j]);
#endif
}
}
else
{
hypre_fprintf(fp, "Warning: No matrix data!\n");
}
fclose(fp);
hypre_CSRMatrixDestroy(matrix);
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixCopy:
* copys A to B,
* if copy_data = 0 only the structure of A is copied to B.
* the routine does not check if the dimensions of A and B match !!!
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_CSRMatrixCopy( hypre_CSRMatrix *A, hypre_CSRMatrix *B, HYPRE_Int copy_data )
{
HYPRE_Int ierr=0;
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int *A_j = hypre_CSRMatrixJ(A);
HYPRE_Complex *A_data;
HYPRE_Int *B_i = hypre_CSRMatrixI(B);
HYPRE_Int *B_j = hypre_CSRMatrixJ(B);
HYPRE_Complex *B_data;
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int i, j;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i=0; i <= num_rows; i++)
{
B_i[i] = A_i[i];
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_nonzeros; ++j)
{
B_j[j] = A_j[j];
}
if (copy_data)
{
A_data = hypre_CSRMatrixData(A);
B_data = hypre_CSRMatrixData(B);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (j=0; j < num_nonzeros; j++)
{
B_data[j] = A_data[j];
}
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixClone
* Creates and returns a new copy of the argument, A.
* Data is not copied, only structural information is reproduced.
* Copying is a deep copy in that no pointers are copied; new arrays are
* created where necessary.
*--------------------------------------------------------------------------*/
hypre_CSRMatrix * hypre_CSRMatrixClone( hypre_CSRMatrix * A )
{
HYPRE_Int num_rows = hypre_CSRMatrixNumRows( A );
HYPRE_Int num_cols = hypre_CSRMatrixNumCols( A );
HYPRE_Int num_nonzeros = hypre_CSRMatrixNumNonzeros( A );
hypre_CSRMatrix * B = hypre_CSRMatrixCreate( num_rows, num_cols, num_nonzeros );
HYPRE_Int * A_i;
HYPRE_Int * A_j;
HYPRE_Int * B_i;
HYPRE_Int * B_j;
HYPRE_Int i, j;
hypre_CSRMatrixInitialize( B );
A_i = hypre_CSRMatrixI(A);
A_j = hypre_CSRMatrixJ(A);
B_i = hypre_CSRMatrixI(B);
B_j = hypre_CSRMatrixJ(B);
for ( i=0; i<num_rows+1; ++i ) B_i[i] = A_i[i];
for ( j=0; j<num_nonzeros; ++j ) B_j[j] = A_j[j];
hypre_CSRMatrixNumRownnz(B) = hypre_CSRMatrixNumRownnz(A);
if ( hypre_CSRMatrixRownnz(A) ) hypre_CSRMatrixSetRownnz( B );
return B;
}
/*--------------------------------------------------------------------------
* hypre_CSRMatrixUnion
* Creates and returns a matrix whose elements are the union of those of A and B.
* Data is not computed, only structural information is created.
* A and B must have the same numbers of rows.
* Nothing is done about Rownnz.
*
* If col_map_offd_A and col_map_offd_B are zero, A and B are expected to have
* the same column indexing. Otherwise, col_map_offd_A, col_map_offd_B should
* be the arrays of that name from two ParCSRMatrices of which A and B are the
* offd blocks.
*
* The algorithm can be expected to have reasonable efficiency only for very
* sparse matrices (many rows, few nonzeros per row).
* The nonzeros of a computed row are NOT necessarily in any particular order.
*--------------------------------------------------------------------------*/
hypre_CSRMatrix * hypre_CSRMatrixUnion(
hypre_CSRMatrix * A, hypre_CSRMatrix * B,
HYPRE_Int * col_map_offd_A, HYPRE_Int * col_map_offd_B, HYPRE_Int ** col_map_offd_C )
{
HYPRE_Int num_rows = hypre_CSRMatrixNumRows( A );
HYPRE_Int num_cols_A = hypre_CSRMatrixNumCols( A );
HYPRE_Int num_cols_B = hypre_CSRMatrixNumCols( B );
HYPRE_Int num_cols;
HYPRE_Int num_nonzeros;
HYPRE_Int * A_i = hypre_CSRMatrixI(A);
HYPRE_Int * A_j = hypre_CSRMatrixJ(A);
HYPRE_Int * B_i = hypre_CSRMatrixI(B);
HYPRE_Int * B_j = hypre_CSRMatrixJ(B);
HYPRE_Int * C_i;
HYPRE_Int * C_j;
HYPRE_Int * jC = NULL;
HYPRE_Int i, jA, jB, jBg;
HYPRE_Int ma, mb, mc, ma_min, ma_max, match;
hypre_CSRMatrix * C;
hypre_assert( num_rows == hypre_CSRMatrixNumRows(B) );
if ( col_map_offd_B ) hypre_assert( col_map_offd_A );
if ( col_map_offd_A ) hypre_assert( col_map_offd_B );
/* ==== First, go through the columns of A and B to count the columns of C. */
if ( col_map_offd_A==0 )
{ /* The matrices are diagonal blocks.
Normally num_cols_A==num_cols_B, col_starts is the same, etc.
*/
num_cols = hypre_max( num_cols_A, num_cols_B );
}
else
{ /* The matrices are offdiagonal blocks. */
jC = hypre_CTAlloc( HYPRE_Int, num_cols_B );
num_cols = num_cols_A; /* initialization; we'll compute the actual value */
for ( jB=0; jB<num_cols_B; ++jB )
{
match = 0;
jBg = col_map_offd_B[jB];
for ( ma=0; ma<num_cols_A; ++ma )
{
if ( col_map_offd_A[ma]==jBg )
match = 1;
}
if ( match==0 )
{
jC[jB] = num_cols;
++num_cols;
}
}
}
/* ==== If we're working on a ParCSRMatrix's offd block,
make and load col_map_offd_C */
if ( col_map_offd_A )
{
*col_map_offd_C = hypre_CTAlloc( HYPRE_Int, num_cols );
for ( jA=0; jA<num_cols_A; ++jA )
(*col_map_offd_C)[jA] = col_map_offd_A[jA];
for ( jB=0; jB<num_cols_B; ++jB )
{
match = 0;
jBg = col_map_offd_B[jB];
for ( ma=0; ma<num_cols_A; ++ma )
{
if ( col_map_offd_A[ma]==jBg )
match = 1;
}
if ( match==0 )
(*col_map_offd_C)[ jC[jB] ] = jBg;
}
}
/* ==== The first run through A and B is to count the number of nonzero elements,
without HYPRE_Complex-counting duplicates. Then we can create C. */
num_nonzeros = hypre_CSRMatrixNumNonzeros(A);
for ( i=0; i<num_rows; ++i )
{
ma_min = A_i[i]; ma_max = A_i[i+1];
for ( mb=B_i[i]; mb<B_i[i+1]; ++mb )
{
jB = B_j[mb];
if ( col_map_offd_B ) jB = col_map_offd_B[jB];
match = 0;
for ( ma=ma_min; ma<ma_max; ++ma )
{
jA = A_j[ma];
if ( col_map_offd_A ) jA = col_map_offd_A[jA];
if ( jB == jA )
{
match = 1;
if( ma==ma_min ) ++ma_min;
break;
}
}
if ( match==0 )
++num_nonzeros;
}
}
C = hypre_CSRMatrixCreate( num_rows, num_cols, num_nonzeros );
hypre_CSRMatrixInitialize( C );
/* ==== The second run through A and B is to pick out the column numbers
for each row, and put them in C. */
C_i = hypre_CSRMatrixI(C);
C_i[0] = 0;
C_j = hypre_CSRMatrixJ(C);
mc = 0;
for ( i=0; i<num_rows; ++i )
{
ma_min = A_i[i]; ma_max = A_i[i+1];
for ( ma=ma_min; ma<ma_max; ++ma )
{
C_j[mc] = A_j[ma];
++mc;
}
for ( mb=B_i[i]; mb<B_i[i+1]; ++mb )
{
jB = B_j[mb];
if ( col_map_offd_B ) jB = col_map_offd_B[jB];
match = 0;
for ( ma=ma_min; ma<ma_max; ++ma )
{
jA = A_j[ma];
if ( col_map_offd_A ) jA = col_map_offd_A[jA];
if ( jB == jA )
{
match = 1;
if( ma==ma_min ) ++ma_min;
break;
}
}
if ( match==0 )
{
if ( col_map_offd_A )
C_j[mc] = jC[ B_j[mb] ];
else
C_j[mc] = B_j[mb];
/* ... I don't know whether column indices are required to be in any
particular order. If so, we'll need to sort. */
++mc;
}
}
C_i[i+1] = mc;
}
hypre_assert( mc == num_nonzeros );
if (jC) hypre_TFree( jC );
return C;
}
static HYPRE_Int hypre_CSRMatrixGetLoadBalancedPartitionBoundary(hypre_CSRMatrix *A, HYPRE_Int idx)
{
HYPRE_Int num_nonzerosA = hypre_CSRMatrixNumNonzeros(A);
HYPRE_Int num_rowsA = hypre_CSRMatrixNumRows(A);
HYPRE_Int *A_i = hypre_CSRMatrixI(A);
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int nonzeros_per_thread = (num_nonzerosA + num_threads - 1)/num_threads;
if (idx <= 0)
{
return 0;
}
else if (idx >= num_threads)
{
return num_rowsA;
}
else
{
return (HYPRE_Int)(hypre_LowerBound(A_i, A_i + num_rowsA, nonzeros_per_thread*idx) - A_i);
}
}
HYPRE_Int hypre_CSRMatrixGetLoadBalancedPartitionBegin(hypre_CSRMatrix *A)
{
return hypre_CSRMatrixGetLoadBalancedPartitionBoundary(A, hypre_GetThreadNum());
}
HYPRE_Int hypre_CSRMatrixGetLoadBalancedPartitionEnd(hypre_CSRMatrix *A)
{
return hypre_CSRMatrixGetLoadBalancedPartitionBoundary(A, hypre_GetThreadNum() + 1);
}
|
gradient.c | /********************************************************************[libaroma]*
* Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*______________________________________________________________________________
*
* Filename : gradient.c
* Description : gradient drawing
*
* + This is part of libaroma, an embedded ui toolkit.
* + 06/04/15 - Author(s): Ahmad Amarullah
*
*/
#ifndef __libaroma_gradient_c__
#define __libaroma_gradient_c__
#include <aroma_internal.h>
#ifdef __cplusplus
extern "C" {
#endif
bytep _libaroma_gradient_corner(
int r) {
if (r < 1) {
ALOGW("_libaroma_gradient_corner radius<1");
return NULL;
}
/* Allocating Memory */
int i, n;
int sz = r * r;
bytep out = calloc(sz,1);
/* Pythagoras Based */
for (i = 1; i <= r; i++) {
float w = sqrt(sz - i * i);
int fw = (int) floor(w);
byte t = (byte) MIN(round((w - ((float) fw)) * 0xff), 0xff);
int idx = ((r - i) * r) + (r - fw - 1);
out[idx] = (byte) MAX(t, out[idx]);
idx = ((r - fw - 1) * r) + (r - i);
out[idx] = (byte) MAX(t, out[idx]);
/* Set opaque for leftover pixels */
for (n = 1; n <= fw; n++) {
idx = ((r - i) * r) + (r - n);
out[idx] = 0xff;
}
}
return out;
}
static inline void _libaroma_gradient_draw_rounded(
wordp __restrict line_mem, bytep __restrict line_alpha,
wordp __restrict roundTmp,
bytep __restrict roundData, int roundSize, byte isRight, int y) {
int i;
if (line_alpha != NULL) {
if (isRight) {
for (i = 0; i < roundSize; i++) {
line_alpha[i] = MAX(line_alpha[i] +
roundData[y * roundSize + (roundSize - i) - 1] - 0xff, 0);
}
}
else {
for (i = 0; i < roundSize; i++) {
line_alpha[i] = MAX(line_alpha[i] +
roundData[y * roundSize + i] - 0xff, 0);
}
}
}
else {
if (isRight) {
for (i = 0; i < roundSize; i++) {
line_mem[i] = libaroma_alpha(roundTmp[i],
line_mem[i], roundData[y * roundSize + (roundSize - i) - 1]);
}
}
else {
libaroma_alpha_px(
roundSize,
line_mem,
roundTmp,
line_mem,
roundData+y*roundSize
);/*
for (i = 0; i < roundSize; i++) {
line_mem[i] = libaroma_alpha(roundTmp[i],
line_mem[i], roundData[y * roundSize + i]);
}*/
}
}
}
/*
* Function : libaroma_gradient_ex1
* Return Value: byte
* Descriptions: draw gradient rectangle
*/
byte libaroma_gradient_ex1(
LIBAROMA_CANVASP dst,
int x, int y, int w, int h,
word startColor, word endColor,
int roundSize, word roundFlag,
byte startAlpha, byte endAlpha,
byte flags) {
if (dst == NULL) {
dst = libaroma_fb()->canvas;
}
byte noDither = (flags&LIBAROMA_DRAW_NODITHER)?1:0;
/* fix position */
int x2 = x + w;
int y2 = y + h;
if (x2 > dst->w) {
x2 = dst->w;
}
if (y2 > dst->h) {
y2 = dst->h;
}
if (x < 0) {
x = 0;
}
if (y < 0) {
y = 0;
}
byte samecolor = (startColor==endColor)?1:0;
/* alpha handling */
byte useAlpha = 1;
byte useCanvasAlpha = 0;
byte ignoreAlpha = (roundSize < 0) ? 1 : 0;
if ((startAlpha == 0xff) && (endAlpha == 0xff)) {
useAlpha = 0;
}
if (!(flags&LIBAROMA_DRAW_NO_DST_ALPHA)){
if (dst->alpha != NULL) {
useCanvasAlpha = 1;
useAlpha = 0;
}
}
if (ignoreAlpha) {
roundSize = 0;
useAlpha = 0;
useCanvasAlpha = 0;
}
/* prepare */
w = x2 - x;
h = y2 - y;
if (roundSize > h / 2) {
roundSize = h / 2;
}
if (roundSize > w / 2) {
roundSize = w / 2;
}
byte roundCorners[4] = { 0, 0, 0, 0 };
bytep roundData = NULL;
if ((roundSize > 0) && (roundFlag != 0)) {
roundCorners[0] = ((roundFlag & 0x1000) == 0x1000) ? 1 : 0;
roundCorners[1] = ((roundFlag & 0x0100) == 0x0100) ? 1 : 0;
roundCorners[2] = ((roundFlag & 0x0010) == 0x0010) ? 1 : 0;
roundCorners[3] = ((roundFlag & 0x0001) == 0x0001) ? 1 : 0;
roundData = _libaroma_gradient_corner(roundSize);
}
/* draw */
int _Y;
#ifdef LIBAROMA_CONFIG_OPENMP
#pragma omp parallel for
#endif
for (_Y = 0; _Y < h; _Y++) {
bytep line_alpha = NULL;
wordp roundTmp = NULL;
if (roundData!=NULL){
roundTmp = (wordp) malloc(roundSize * 2 * 2);
}
int ypos = y + _Y;
byte cR=0,cG=0,cB=0;
#ifdef LIBAROMA_CONFIG_GRADIENT_FLOAT
float intensity = ((float) _Y) / ((float) h);
float r_intensity = 1.0 - intensity;
if (!samecolor){
cR = ((byte) MIN(((libaroma_color_r(startColor) * r_intensity) +
(libaroma_color_r(endColor) * intensity)), 0xff) );
cG = ((byte) MIN(((libaroma_color_g(startColor) * r_intensity) +
(libaroma_color_g(endColor) * intensity)), 0xff) );
cB = ((byte) MIN(((libaroma_color_b(startColor) * r_intensity) +
(libaroma_color_b(endColor) * intensity)), 0xff) );
}
#else
word intensity = (_Y * 0x100) / h;
word r_intensity = 0x100 - intensity;
if (!samecolor){
cR = ((byte) MIN((((libaroma_color_r(startColor) * r_intensity)>>8)+
((libaroma_color_r(endColor) * intensity) >> 8)), 0xff) );
cG = ((byte) MIN((((libaroma_color_g(startColor) * r_intensity)>>8)+
((libaroma_color_g(endColor) * intensity) >> 8)), 0xff) );
cB = ((byte) MIN((((libaroma_color_b(startColor) * r_intensity)>>8)+
((libaroma_color_b(endColor) * intensity) >> 8)), 0xff) );
}
#endif
int data_posxy = (ypos * dst->l) + x;
wordp line_mem = (wordp) dst->data + data_posxy;
if (useCanvasAlpha) {
line_alpha = dst->alpha+data_posxy;
}
/* Save Bg Data */
byte drawRound = 0;
if (roundData != NULL) {
if (_Y < roundSize) {
if (roundCorners[0]) {
memcpy(roundTmp, line_mem, roundSize * 2);
drawRound = 1;
}
if (roundCorners[1]) {
memcpy(roundTmp + roundSize,
line_mem + w - roundSize, roundSize * 2);
drawRound = 1;
}
}
else if (_Y >= h - roundSize) {
if (roundCorners[2]) {
memcpy(roundTmp, line_mem, roundSize * 2);
drawRound = 1;
}
if (roundCorners[3]) {
memcpy(roundTmp + roundSize,
line_mem + w - roundSize, roundSize * 2);
drawRound = 1;
}
}
}
/* Draw Now */
if (useAlpha) {
byte cA = startAlpha;
if (startAlpha!=endAlpha){
#ifdef LIBAROMA_CONFIG_GRADIENT_FLOAT
cA = ((byte) MIN(
((startAlpha * r_intensity) + (endAlpha * intensity)), 0xff));
#else
cA = ((byte) MIN(
(((startAlpha * r_intensity) >> 8) +
((endAlpha * intensity) >> 8)), 0xff));
#endif
}
if (!samecolor){
if (noDither){
/*
libaroma_color_set(alphaTmpLine, libaroma_rgb(cR, cG, cB), w);
libaroma_alpha_const(w,
line_mem, line_mem, alphaTmpLine, cA);
*/
libaroma_alpha_rgba_fill(w,
line_mem,
line_mem,
libaroma_rgb(cR, cG, cB),
cA
);
}
else{
libaroma_alpha_rgba_fill_line(_Y,w,
line_mem,
line_mem,
libaroma_rgb(cR, cG, cB),
cA
);
/*
libaroma_dither_line_const(_Y, w,
alphaTmpLine, libaroma_rgb32(cR, cG, cB));
libaroma_alpha_const_line(_Y, w,
line_mem, line_mem, alphaTmpLine, cA);*/
}
}
else{
libaroma_alpha_rgba_fill(w,line_mem,line_mem,startColor,cA);
}
}
else {
if (!samecolor){
if (noDither){
libaroma_color_set(line_mem, libaroma_rgb(cR, cG, cB), w);
}
else{
libaroma_dither_line_const(_Y, w,
line_mem, libaroma_rgb32(cR, cG, cB));
}
}
else{
libaroma_color_set(line_mem, startColor, w);
}
if (useCanvasAlpha) {
byte cA = startAlpha;
if (startAlpha!=endAlpha){
#ifdef LIBAROMA_CONFIG_GRADIENT_FLOAT
cA = ((byte) MIN(((startAlpha * r_intensity) +
(endAlpha * intensity)), 0xff));
#else
cA = ((byte) MIN((((startAlpha * r_intensity) >> 8) +
((endAlpha * intensity) >> 8)), 0xff));
#endif
}
memset(line_alpha, cA, w);
}
}
/* corners */
if (drawRound) {
if (_Y < roundSize) {
if (roundCorners[0]) {
_libaroma_gradient_draw_rounded(line_mem, line_alpha, roundTmp,
roundData, roundSize, 0, _Y);
}
if (roundCorners[1]) {
if (line_alpha != NULL) {
line_alpha = line_alpha + w - (roundSize);
}
_libaroma_gradient_draw_rounded(line_mem + w - roundSize,
line_alpha, roundTmp + roundSize, roundData, roundSize, 1, _Y);
}
}
else if (_Y >= h - roundSize) {
if (roundCorners[2]) {
_libaroma_gradient_draw_rounded(line_mem, line_alpha, roundTmp,
roundData, roundSize, 0, h - _Y - 1);
}
if (roundCorners[3]) {
if (line_alpha != NULL) {
line_alpha = line_alpha + w - (roundSize);
}
_libaroma_gradient_draw_rounded(line_mem + w - roundSize,
line_alpha, roundTmp + roundSize,
roundData, roundSize, 1, h - _Y - 1);
}
}
}
/*
if (useAlpha) {
free(alphaTmpLine);
}*/
if (roundData != NULL) {
free(roundTmp);
}
}
if (roundData != NULL) {
free(roundData);
//free(roundTmp);
}
return 1;
} /* End of libaroma_gradient_ex1 */
#ifdef __cplusplus
}
#endif
#endif /* __libaroma_gradient_c__ */
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
is.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
--------------------------------------------------------------------
NAS Parallel Benchmarks 2.3 OpenMP C versions - IS
This benchmark is an OpenMP C version of the NPB IS code.
The OpenMP C versions are developed by RWCP and derived from the serial
Fortran versions in "NPB 2.3-serial" developed by NAS.
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted.
This software is provided "as is" without express or implied warranty.
Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp
Information on OpenMP activities at RWCP is available at:
http:pdplab.trc.rwcp.or.jppdperf/Omni/
Information on NAS Parallel Benchmarks 2.3 is available at:
http:www.nas.nasa.gov/NAS/NPB/
--------------------------------------------------------------------
*/
/*
--------------------------------------------------------------------
Author: M. Yarrow
OpenMP C version: S. Satoh
--------------------------------------------------------------------
*/
#include "npbparams.h"
#include <stdlib.h>
#include <stdio.h>
/* */
/* For serial IS, buckets are not really req'd to solve NPB1 IS */
/* spec, but their use on some machines improves performance, on */
/* other machines the use of buckets compromises performance, */
/* probably because it is extra computation which is not req'd. */
/* (Note: Mechanism not understood, probably cache related) */
/* Example: SP2-66MhzWN: 50% speedup with buckets */
/* Example: SGI Indy5000: 50% slowdown with buckets */
/* Example: SGI O2000: 400% slowdown with buckets (Wow!) */
/* */
/* #define USE_BUCKETS */
/* buckets are not used in the OpenMP C version */
/* */
/* default values */
/* */
/* */
/* CLASS S */
/* */
/* */
/* CLASS W */
/* */
/* */
/* CLASS A */
/* */
/* */
/* CLASS B */
/* */
/* */
/* CLASS C */
/* */
/* */
/* Typedef: if necessary, change the */
/* size of int here by changing the */
/* int type to, say, long */
/* */
typedef int INT_TYPE;
/* */
/* Some global info */
/* */
INT_TYPE * key_buff_ptr_global;
/* used by full_verify to get */
/* copies of rank info */
int passed_verification;
/* */
/* These are the three main arrays. */
/* See SIZE_OF_BUFFERS def above */
/* */
INT_TYPE key_array[(1<<23)], key_buff1[(1<<23)], key_buff2[(1<<23)], partial_verify_vals[5];
/* */
/* Partial verif info */
/* */
INT_TYPE test_index_array[5], test_rank_array[5], S_test_index_array[5] = {48427, 17148, 23627, 62548, 4431}, S_test_rank_array[5] = {0, 18, 346, 64917, 65463}, W_test_index_array[5] = {357773, 934767, 875723, 898999, 404505}, W_test_rank_array[5] = {1249, 11698, 1039987, 1043896, 1048018}, A_test_index_array[5] = {2112377, 662041, 5336171, 3642833, 4250760}, A_test_rank_array[5] = {104, 17523, 123928, 8288932, 8388264}, B_test_index_array[5] = {41869, 812306, 5102857, 18232239, 26860214}, B_test_rank_array[5] = {33422937, 10244, 59149, 33135281, 99}, C_test_index_array[5] = {44172927, 72999161, 74326391, 129606274, 21736814}, C_test_rank_array[5] = {61147, 882988, 266290, 133997595, 133525895};
/* */
/* function prototypes */
/* */
double randlc(double * X, double * A);
void full_verify(void );
/*
FUNCTION RANDLC (X, A)
*
* This routine returns a uniform pseudorandom double precision number in the
* range (0, 1) by using the linear congruential generator
*
* x_{k+1} = a x_k (mod 2^46)
*
* where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
* before repeating. The argument A is the same as 'a' in the above formula,
* and X is the same as x_0. A and X must be odd double precision integers
* in the range (1, 2^46). The returned value RANDLC is normalized to be
* between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain
* the new seed x_1, so that subsequent calls to RANDLC using the same
* arguments will generate a continuous sequence.
*
* This routine should produce the same results on any computer with at least
* 48 mantissa bits in double precision floating point data. On Cray systems,
* double precision should be disabled.
*
* David H. Bailey October 26, 1990
*
* IMPLICIT DOUBLE PRECISION (A-H, O-Z)
* SAVE KS, R23, R46, T23, T46
* DATA KS/0/
*
* If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46,
* T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than
* by merely using the ** operator, in order to insure that the results are
* exact on all systems. This code assumes that 0.5D0 is represented exactly.
*/
/* */
/* R A N D L C */
/* */
/* portable random number generator */
/* */
double randlc(double * X, double * A)
{
static int KS = 0;
static double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
double _ret_val_0;
if (KS==0)
{
R23=1.0;
R46=1.0;
T23=1.0;
T46=1.0;
#pragma loop name randlc#0
#pragma cetus reduction(*: R23, T23)
#pragma cetus parallel
#pragma omp parallel for reduction(*: R23, T23)
for (i=1; i<=23; i ++ )
{
R23=(0.5*R23);
T23=(2.0*T23);
}
#pragma loop name randlc#1
#pragma cetus reduction(*: R46, T46)
#pragma cetus parallel
#pragma omp parallel for reduction(*: R46, T46)
for (i=1; i<=46; i ++ )
{
R46=(0.5*R46);
T46=(2.0*T46);
}
KS=1;
}
/* Break A into two parts such that A = 2^23 A1 + A2 and set X = N. */
T1=(R23*( * A));
j=T1;
A1=j;
A2=(( * A)-(T23*A1));
/*
Break X into two parts such that X = 2^23 X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46).
*/
T1=(R23*( * X));
j=T1;
X1=j;
X2=(( * X)-(T23*X1));
T1=((A1*X2)+(A2*X1));
j=(R23*T1);
T2=j;
Z=(T1-(T23*T2));
T3=((T23*Z)+(A2*X2));
j=(R46*T3);
T4=j;
( * X)=(T3-(T46*T4));
_ret_val_0=(R46*( * X));
return _ret_val_0;
}
/* */
/* C R E A T E _ S E Q */
/* */
void create_seq(double seed, double a)
{
double x;
int i, j, k;
k=((1<<19)/4);
#pragma loop name create_seq#0
for (i=0; i<(1<<23); i ++ )
{
x=randlc( & seed, & a);
x+=randlc( & seed, & a);
x+=randlc( & seed, & a);
x+=randlc( & seed, & a);
key_array[i]=(k*x);
}
return ;
}
/* */
/* F U L L _ V E R I F Y */
/* */
void full_verify()
{
INT_TYPE i, j;
INT_TYPE k;
INT_TYPE m, unique_keys;
/* Now, finally, sort the keys: */
#pragma loop name full_verify#0
for (i=0; i<(1<<23); i ++ )
{
key_array[ -- key_buff_ptr_global[key_buff2[i]]]=key_buff2[i];
}
/* Confirm keys correctly sorted: count incorrectly sorted keys, if any */
j=0;
#pragma loop name full_verify#1
#pragma cetus reduction(+: j)
#pragma cetus parallel
#pragma omp parallel for reduction(+: j)
for (i=1; i<(1<<23); i ++ )
{
if (key_array[i-1]>key_array[i])
{
j ++ ;
}
}
if (j!=0)
{
printf("Full_verify: number of keys out of sort: %d\n", j);
}
else
{
passed_verification ++ ;
}
return ;
}
/* */
/* R A N K */
/* */
void rank(int iteration)
{
INT_TYPE i, j, k;
INT_TYPE l, m;
INT_TYPE shift = 19-10;
INT_TYPE key;
INT_TYPE min_key_val, max_key_val;
INT_TYPE prv_buff1[(1<<19)];
key_array[iteration]=iteration;
key_array[iteration+10]=((1<<19)-iteration);
/* Determine where the partial verify test keys are, load into */
/* top of array bucket_size */
#pragma loop name rank#0
for (i=0; i<5; i ++ )
{
partial_verify_vals[i]=key_array[test_index_array[i]];
}
/* Clear the work array */
#pragma loop name rank#1
#pragma cetus parallel
#pragma omp parallel for
for (i=0; i<(1<<19); i ++ )
{
key_buff1[i]=0;
}
#pragma loop name rank#2
#pragma cetus parallel
#pragma omp parallel for
for (i=0; i<(1<<19); i ++ )
{
prv_buff1[i]=0;
}
/* Copy keys into work array; keys in key_array will be reused each iter. */
#pragma loop name rank#3
#pragma cetus reduction(+: prv_buff1[key_buff2[i]])
for (i=0; i<(1<<23); i ++ )
{
key_buff2[i]=key_array[i];
/* Ranking of all keys occurs in this section: */
/*
In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population
*/
prv_buff1[key_buff2[i]] ++ ;
/* Now they have individual key */
}
/* population */
#pragma loop name rank#4
for (i=0; i<((1<<19)-1); i ++ )
{
prv_buff1[i+1]+=prv_buff1[i];
}
#pragma loop name rank#5
for (i=0; i<(1<<19); i ++ )
{
key_buff1[i]+=prv_buff1[i];
}
/*
To obtain ranks of each key, successively add the individual key
population, not forgetting to add m, the total of lesser keys,
to the first key population
*/
/* This is the partial verify test section */
/* Observe that test_rank_array vals are */
/* shifted differently for different cases */
#pragma loop name rank#6
#pragma cetus reduction(+: passed_verification)
for (i=0; i<5; i ++ )
{
k=partial_verify_vals[i];
/* test vals were put here */
if ((0<=k)&&(k<=((1<<23)-1)))
{
switch ('A')
{
case 'S':
if (i<=2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'W':
if (i<2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+(iteration-2)))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'A':
if (i<=2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+(iteration-1)))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-(iteration-1)))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'B':
if (((i==1)||(i==2))||(i==4))
{
if (key_buff1[k-1]!=(test_rank_array[i]+iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
case 'C':
if (i<=2)
{
if (key_buff1[k-1]!=(test_rank_array[i]+iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
else
{
if (key_buff1[k-1]!=(test_rank_array[i]-iteration))
{
printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i);
}
else
{
passed_verification ++ ;
}
}
break;
}
}
}
/*
Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler
*/
if (iteration==10)
{
key_buff_ptr_global=key_buff1;
}
/* end master */
return ;
}
/* */
/* M A I N */
/* */
main(int argc, char * * argv)
{
int i, iteration, itemp;
int nthreads = 1;
double timecounter, maxtime;
/* Initialize the verification arrays if a valid class */
int _ret_val_0;
#pragma loop name main#0
for (i=0; i<5; i ++ )
{
switch ('A')
{
case 'S':
test_index_array[i]=S_test_index_array[i];
test_rank_array[i]=S_test_rank_array[i];
break;
case 'A':
test_index_array[i]=A_test_index_array[i];
test_rank_array[i]=A_test_rank_array[i];
break;
case 'W':
test_index_array[i]=W_test_index_array[i];
test_rank_array[i]=W_test_rank_array[i];
break;
case 'B':
test_index_array[i]=B_test_index_array[i];
test_rank_array[i]=B_test_rank_array[i];
break;
case 'C':
test_index_array[i]=C_test_index_array[i];
test_rank_array[i]=C_test_rank_array[i];
break;
}
}
;
/* Printout initial NPB info */
printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"" - IS Benchmark\n\n");
printf(" Size: %d (class %c)\n", 1<<23, 'A');
printf(" Iterations: %d\n", 10);
/* Initialize timer */
timer_clear(0);
/* Generate random number sequence and subsequent keys on all procs */
/* Random number gen seed */
create_seq(3.14159265E8, 1.220703125E9);
/* Random number gen mult */
/*
Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables
*/
rank(1);
/* Start verification counter */
passed_verification=0;
printf("\n iteration\n");
/* Start timer */
timer_start(0);
/* This is the main iteration */
#pragma loop name main#1
for (iteration=1; iteration<=10; iteration ++ )
{
printf(" %d\n", iteration);
rank(iteration);
}
/* End of timing, obtain maximum time of all processors */
timer_stop(0);
timecounter=timer_read(0);
/*
This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation
*/
full_verify();
/* The final printout */
if (passed_verification!=((5*10)+1))
{
passed_verification=0;
}
c_print_results("IS", 'A', 1<<23, 0, 0, 10, nthreads, timecounter, (((double)(10*(1<<23)))/timecounter)/1000000.0, "keys ranked", passed_verification, "3.0 structured", "01 Dec 2019", "(none)", "(none)", "-lm", "(none)", "(none)", "(none)", "randlc");
/* */
return _ret_val_0;
}
|
macc.h | #ifndef _MACC_H
#define _MACC_H 1
#include <omp.h>
#include <openacc.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
// Including stdbool.h may cause conflicting.
#define bool int
#define true 1
#define false 0
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
#define TOPADDR(ADDR, LB, TYPE_SIZE) (ADDR + LB * (size_t)TYPE_SIZE)
#define LENGTH_BYTE(LB, UB, TYPE_SIZE) ((UB - LB + 1) * (size_t)TYPE_SIZE)
#define ARE_OVERLAPPING(a_lb, a_ub, b_lb, b_ub) (!(a_lb > b_ub || a_ub < b_lb))
#define ARE_OVERLAPPING_WHOLE(a_lb, a_ub, b_lb, b_ub) (a_lb <= b_lb && b_ub <= a_ub)
#define __MACC_DEVICE_TYPE acc_device_nvidia
#define __MACC_MAX_NUMGPUS 10
int __MACC_NUMGPUS = -1;
int __macc_get_num_gpus()
{
return acc_get_num_devices(__MACC_DEVICE_TYPE);
}
int * __MACC_TOPOLOGY;
int __macc_set_gpu_num(int i)
{
int gpu_id = __MACC_TOPOLOGY[i];
acc_set_device_num(gpu_id, __MACC_DEVICE_TYPE);
return gpu_id;
}
#define MACC_DATA_TABLE_SIZE 256
#define TABLE_INDEX(ptr) (((long)ptr / 16) % MACC_DATA_TABLE_SIZE)
struct __MaccDataTableEntry {
void *addr;
void *addr_ub;
int type_size;
int entire_lb;
int entire_ub;
bool dirty;
int dirty_lb;
int dirty_ub;
int offset;
struct __MaccDataTableEntry *next;
};
struct __MaccDataTable {
struct __MaccDataTableEntry *entries[MACC_DATA_TABLE_SIZE];
};
struct __MaccDataTable * __MACC_DATA_TABLE_SET;
#define MACC_DATA_WRAP_CACHE_LEN 16
#define MACC_DATA_WRAP_CACHE_SIZE 16
#define CACHE_LANE(ptr) (((long)ptr / 16) % MACC_DATA_WRAP_CACHE_LEN)
struct __MaccDataWrapCache {
void * addr[MACC_DATA_WRAP_CACHE_LEN * MACC_DATA_WRAP_CACHE_SIZE];
struct __MaccDataTableEntry * entry[MACC_DATA_WRAP_CACHE_LEN * MACC_DATA_WRAP_CACHE_SIZE];
int offset[MACC_DATA_WRAP_CACHE_LEN * MACC_DATA_WRAP_CACHE_SIZE];
int cachenum[MACC_DATA_WRAP_CACHE_LEN];
};
struct __MaccDataWrapCache * __MACC_DATA_WRAP_CACHE_SET;
void __macc_data_table_insert(
int gpu_num, void *ptr, int type_size, int entire_lb, int entire_ub)
{
int index = TABLE_INDEX(ptr);
struct __MaccDataTableEntry *new_entry = malloc(sizeof(struct __MaccDataTableEntry));
new_entry->addr = ptr;
new_entry->addr_ub = ptr + entire_ub * (size_t)type_size;
new_entry->type_size = type_size;
new_entry->entire_lb = entire_lb;
new_entry->entire_ub = entire_ub;
new_entry->dirty = false;
new_entry->dirty_lb = -1;
new_entry->dirty_ub = -1;
new_entry->next = __MACC_DATA_TABLE_SET[gpu_num].entries[index];
__MACC_DATA_TABLE_SET[gpu_num].entries[index] = new_entry;
}
struct __MaccDataTableEntry *__macc_data_table_find(int gpu_num, void *ptr)
{
int index = TABLE_INDEX(ptr);
struct __MaccDataTableEntry *entry = __MACC_DATA_TABLE_SET[gpu_num].entries[index];
while (entry != NULL) {
if (entry->addr == ptr) {
entry->offset = 0;
return entry;
}
entry = entry->next;
}
struct __MaccDataWrapCache wrap_cache = __MACC_DATA_WRAP_CACHE_SET[gpu_num];
int lane = CACHE_LANE(ptr);
for (int i = 0; i < wrap_cache.cachenum[lane]; i++) {
if (ptr == wrap_cache.addr[lane * MACC_DATA_WRAP_CACHE_SIZE + i]) {
entry = wrap_cache.entry[lane * MACC_DATA_WRAP_CACHE_SIZE + i];
entry->offset = wrap_cache.offset[lane * MACC_DATA_WRAP_CACHE_SIZE + i];
return entry;
}
}
for (int i = 0; i < MACC_DATA_TABLE_SIZE; i++) {
entry = __MACC_DATA_TABLE_SET[gpu_num].entries[i];
while (entry != NULL) {
if (entry->addr <= ptr && ptr <= entry->addr_ub) {
int offset = (ptr - entry->addr) / entry->type_size;
int cachenum = wrap_cache.cachenum[lane];
if (cachenum == MACC_DATA_WRAP_CACHE_SIZE) {
cachenum = 0;
}
wrap_cache.addr[lane * MACC_DATA_WRAP_CACHE_SIZE + cachenum] = entry->addr;
wrap_cache.entry[lane * MACC_DATA_WRAP_CACHE_SIZE + cachenum] = entry;
wrap_cache.offset[lane * MACC_DATA_WRAP_CACHE_SIZE + cachenum] = offset;
wrap_cache.cachenum[lane] = cachenum + 1;
entry->offset = offset;
return entry;
}
entry = entry->next;
}
}
fprintf(stderr, "Error on __macc_data_table_find: Not found the item %p\n", ptr);
exit(-1);
return NULL;
}
void __macc_data_table_delete(int gpu_num, void *ptr)
{
int index = TABLE_INDEX(ptr);
struct __MaccDataTableEntry *entry = __MACC_DATA_TABLE_SET[gpu_num].entries[index];
struct __MaccDataTableEntry *pre = NULL;
memset(__MACC_DATA_WRAP_CACHE_SET[gpu_num].cachenum, 0, MACC_DATA_WRAP_CACHE_LEN * sizeof(int));
if (entry != NULL) {
if(entry->addr == ptr) {
__MACC_DATA_TABLE_SET[gpu_num].entries[index] = entry->next;
free(entry);
return;
}
pre = entry;
entry = entry->next;
}
while (pre != NULL && entry != NULL) {
if (entry->addr == ptr) {
pre->next = entry->next;
free(entry);
return;
}
pre = entry;
entry = entry->next;
}
fprintf(stderr, "Error on __macc_data_table_delete: Not found the item %p\n", ptr);
exit(-1);
}
void __macc_delete(int gpu_num, void *ptr, int type_size, int lb, int length)
{
acc_delete_async(TOPADDR(ptr, lb, type_size), length * (size_t)type_size, gpu_num);
__macc_data_table_delete(gpu_num, ptr);
acc_wait(gpu_num);
}
void __macc_copyout(int gpu_num, void *ptr, int type_size, int lb, int length)
{
struct __MaccDataTableEntry *entry = __macc_data_table_find(gpu_num, ptr);
if (entry->dirty)
acc_update_self_async(TOPADDR(entry->addr, entry->dirty_lb, entry->type_size),
LENGTH_BYTE(entry->dirty_lb, entry->dirty_ub, entry->type_size),
gpu_num);
__macc_delete(gpu_num, ptr, type_size, lb, length);
}
void __macc_copyin(int gpu_num, void *ptr, int type_size, int lb, int length)
{
acc_copyin_async(TOPADDR(ptr, lb, type_size), length * (size_t)type_size, gpu_num);
__macc_data_table_insert(gpu_num, ptr, type_size, lb, lb + length - 1);
acc_wait(gpu_num);
}
void __macc_create(int gpu_num, void *ptr, int type_size, int lb, int length)
{
acc_create_async(TOPADDR(ptr, lb, type_size), length * (size_t)type_size, gpu_num);
__macc_data_table_insert(gpu_num, ptr, type_size, lb, lb + length - 1);
acc_wait(gpu_num);
}
void *__macc_malloc(unsigned long size)
{
void *ret = malloc(size);
#pragma omp parallel num_threads(__MACC_NUMGPUS)
{
__macc_create(omp_get_thread_num(), ret, 1, 0, size);
}
return ret;
}
void __macc_free(void *ptr)
{
#pragma omp parallel num_threads(__MACC_NUMGPUS)
{
int gpu_num = omp_get_thread_num();
struct __MaccDataTableEntry *entry =
__macc_data_table_find(gpu_num, ptr);
__macc_delete(gpu_num, ptr, 1, 0, entry->entire_ub + 1);
}
free(ptr);
}
void __macc_update_self(int gpu_num, void *ptr, int type_size, int lb, int length)
{
struct __MaccDataTableEntry *entry = __macc_data_table_find(gpu_num, ptr);
ptr = entry->addr;
lb += entry->offset;
int ub = lb + length - 1;
if (entry->dirty && ARE_OVERLAPPING(entry->dirty_lb, entry->dirty_ub, lb, ub)) {
int new_lb = MAX(entry->dirty_lb, lb);
int new_ub = MIN(entry->dirty_ub, ub);
acc_update_self(TOPADDR(ptr, new_lb, type_size), LENGTH_BYTE(new_lb, new_ub, type_size));
}
}
void __macc_update_device(int gpu_num, void *ptr, int type_size, int lb, int length)
{
acc_update_device(TOPADDR(ptr, lb, type_size), length * (size_t)type_size);
}
void __macc_init_access_region(int gpu_num, int *lb_set, int *ub_set)
{
lb_set[gpu_num] = INT_MAX;
ub_set[gpu_num] = -1;
}
void __macc_update_access_region(int gpu_num, int *lb_set, int *ub_set, int val)
{
lb_set[gpu_num] = MIN(lb_set[gpu_num], val);
ub_set[gpu_num] = MAX(ub_set[gpu_num], val);
}
bool __macc_region_is_overlapping(int *lb_set, int *ub_set)
{
for (int i = 0; i < __MACC_NUMGPUS - 1; i++)
for (int j = i + 1; j < __MACC_NUMGPUS; j++)
if (ARE_OVERLAPPING(lb_set[i], ub_set[i], lb_set[j], ub_set[j]))
return true;
return false;
}
// <= or < only
void __macc_calc_loop_region
(int *loop_lb_set, int *loop_ub_set,
int entire_start, int entire_end, int step, int until_equal)
{
int tmp = entire_start + step * ((entire_end - entire_start) / step);
entire_end = tmp - ((until_equal || entire_end != tmp) ? 0 : step);
int len = entire_end - entire_start + step;
int width = (int)((float)len / __MACC_NUMGPUS);
width -= width % step;
int rem = (len - width * __MACC_NUMGPUS) / step;
width -= step;
int pos = entire_start;
for (int i = 0; i < __MACC_NUMGPUS; i++) {
loop_lb_set[i] = pos;
pos = (width < 0) ? pos : MIN(pos + width + ((i < rem) ? step : 0), entire_end);
loop_ub_set[i] = pos;
pos = MIN(pos + step, entire_end);
}
}
void __macc_adjust_data_region(void *ptr, int gpu_num, int *lb_set, int *ub_set)
{
struct __MaccDataTableEntry *entry = __macc_data_table_find(gpu_num, ptr);
lb_set[gpu_num] += entry->offset;
ub_set[gpu_num] += entry->offset;
}
void __macc_rewrite_loop_region_into_single(int *loop_lb_set, int *loop_ub_set)
{
loop_ub_set[0] = loop_ub_set[__MACC_NUMGPUS - 1];
for (int i = 1; i < __MACC_NUMGPUS; i++) {
loop_lb_set[i] = 1;
loop_ub_set[i] = 0;
}
}
void __macc_rewrite_data_region_into_single(int *lb_set, int *ub_set)
{
int gpu_ub = __MACC_NUMGPUS - 1;
lb_set[0] = MIN(lb_set[0], lb_set[gpu_ub]);
ub_set[0] = MAX(ub_set[0], ub_set[gpu_ub]);
}
extern void cudaMemcpyPeerAsync(void *, int, void *, int, size_t, void *);
extern void cudaDeviceEnablePeerAccess(int, int);
void __macc_p2p(int from, int to, void *ptr, size_t length_b)
{
void *from_ptr, *to_ptr;
void * s = acc_get_cuda_stream(from);
to = __macc_set_gpu_num(to);
to_ptr = acc_deviceptr(ptr);
from = __macc_set_gpu_num(from);
from_ptr = acc_deviceptr(ptr);
cudaMemcpyPeerAsync(to_ptr, to, from_ptr, from, length_b, s);
}
void __macc_sync_data(int gpu_num, void *ptr, int type_size, int lb, int ub, bool including_host)
{
void *update_addr = TOPADDR(ptr, lb, type_size);
size_t length_b = LENGTH_BYTE(lb, ub, type_size);
if (including_host)
acc_update_self_async(update_addr, length_b, gpu_num);
for (int i = 0; i < __MACC_NUMGPUS; i++) {
if (i != gpu_num)
__macc_p2p(gpu_num, i, update_addr, length_b);
}
}
// (use|def)_type: 0->non-affine, 1->nothing, 2->affine
void __macc_set_data_region(int gpu_num, void *ptr, int multi,
int use_type, int *use_lb_set, int *use_ub_set,
int def_type, int *def_lb_set, int *def_ub_set)
{
struct __MaccDataTableEntry *entry = __macc_data_table_find(gpu_num, ptr);
ptr = entry->addr;
//
// update: dirty /\ DEF_{*-i}, dirty /\ USE_{*-i}
//
if (entry->dirty && (multi || gpu_num != 0) && __MACC_NUMGPUS > 1) {
bool update_all = false;
bool including_host = false;
if (def_type == 0) {
update_all = true;
including_host = true;
}
else if (def_type == 2) {
for (int i = 0; i < __MACC_NUMGPUS; i++) {
if (i != gpu_num &&
ARE_OVERLAPPING(entry->dirty_lb, entry->dirty_ub,
def_lb_set[i], def_ub_set[i])) {
update_all = true;
including_host = true;
break;
}
}
}
if (use_type == 0) {
update_all = true;
}
// update all dirty
if (update_all) {
__macc_sync_data(gpu_num, ptr, entry->type_size, entry->dirty_lb, entry->dirty_ub,
including_host);
if (including_host)
entry->dirty = false;
}
// USE /\ dirty (don't change dirty region)
else if (entry->dirty && use_type == 2) {
int thread_num = multi ? __MACC_NUMGPUS : 1;
for (int i = 0; i < thread_num; i++) {
if (i != gpu_num && ARE_OVERLAPPING(entry->dirty_lb,
entry->dirty_ub,
use_lb_set[i],
use_ub_set[i])) {
int update_lb = MAX(entry->dirty_lb, use_lb_set[i]);
int update_ub = MIN(entry->dirty_ub, use_ub_set[i]);
void *update_addr = TOPADDR(ptr, update_lb, entry->type_size);
size_t length_b = LENGTH_BYTE(update_lb, update_ub, entry->type_size);
__macc_p2p(gpu_num, i, update_addr, length_b);
}
}
}
}
//
// DEF
//
// update: DEF_{i} (when dirty and DEF_{i} are separated)
//
if ((multi || gpu_num == 0) && def_type != 1) {
if (def_type == 0) {
entry->dirty = true;
entry->dirty_lb = entry->entire_lb;
entry->dirty_ub = entry->entire_ub;
}
else if (!(entry->dirty)) {
entry->dirty = true;
entry->dirty_lb = def_lb_set[gpu_num];
entry->dirty_ub = def_ub_set[gpu_num];
}
else if (
// overlapping
ARE_OVERLAPPING(entry->dirty_lb,
entry->dirty_ub,
def_lb_set[gpu_num],
def_ub_set[gpu_num]) ||
// adjacent
entry->dirty_lb == def_ub_set[gpu_num] + 1 ||
def_lb_set[gpu_num] == entry->dirty_ub + 1
) {
entry->dirty_lb = MIN(entry->dirty_lb, def_lb_set[gpu_num]);
entry->dirty_ub = MAX(entry->dirty_ub, def_ub_set[gpu_num]);
}
else {
__macc_sync_data(gpu_num, ptr, entry->type_size, entry->dirty_lb, entry->dirty_ub, true);
entry->dirty_lb = def_lb_set[gpu_num];
entry->dirty_ub = def_ub_set[gpu_num];
}
}
acc_wait(gpu_num);
}
void __macc_init()
{
char *env_macc_numgpus = getenv("MACC_NUMGPUS");
if (env_macc_numgpus != NULL) {
__MACC_NUMGPUS = atoi(env_macc_numgpus);
}
else {
__MACC_NUMGPUS = __macc_get_num_gpus();
}
if (__MACC_NUMGPUS <= 0) {
fputs("[MACC ERROR] No GPU device found.", stderr);
exit(-1);
}
__MACC_TOPOLOGY = malloc(__MACC_NUMGPUS * sizeof(int));
char * topo = getenv("MACC_TOPOLOGY");
if (topo != NULL) {
int i = 0;
topo = strtok(topo, ",");
while (topo != NULL && i < __MACC_NUMGPUS) {
__MACC_TOPOLOGY[i] = atoi(topo);
topo = strtok(NULL, ",");
i++;
}
} else {
for (int i = 0; i < __MACC_NUMGPUS; i++)
__MACC_TOPOLOGY[i] = i;
}
for (int i = 0; i < __MACC_NUMGPUS; i++) {
acc_set_device_num(__MACC_TOPOLOGY[i], __MACC_DEVICE_TYPE);
for (int j = 0; j < __MACC_NUMGPUS; j++) {
if (i != j) {
cudaDeviceEnablePeerAccess(__MACC_TOPOLOGY[j], 0);
}
}
}
/* if (getenv("OMP_NESTED") == NULL || getenv("OMP_MAX_ACTIVE_LEVELS") == NULL) { */
/* fputs("[MACC ERROR] Improper setting.\n" */
/* "\n" */
/* "In order to make nested-parallel available,\n" */
/* "run the commands below before running the program.\n" */
/* "\n" */
/* "\t" "export OMP_NESTED=TRUE\n" */
/* "\t" "export OMP_MAX_ACTIVE_LEVELS=3\n" */
/* "\n", */
/* stderr); */
/* exit(-1); */
/* } */
__MACC_DATA_TABLE_SET = calloc(__MACC_NUMGPUS, sizeof(struct __MaccDataTable));
__MACC_DATA_WRAP_CACHE_SET = calloc(__MACC_NUMGPUS, sizeof(struct __MaccDataWrapCache));
// Wake up
for (int t = 0; t < 10; t++) {
printf("[MACC] Wake up (%d)\n", t);
int n = 256 * 1024 * 1024;
int * tmp = malloc(n * sizeof(int));
#pragma acc data copy(tmp[0:n])
{
#pragma acc parallel loop\
num_gangs(512) vector_length(1024) gang vector
for (int i = 1; i < n; i++)
tmp[i] = i;
#pragma acc parallel loop\
num_gangs(512) vector_length(1024) gang vector
for (int i = 1; i < n; i++)
tmp[n - i] += i;
}
free(tmp);
}
}
#undef bool
#undef true
#undef false
#undef MIN
#undef MAX
#undef TOPADDR
#undef LENGTH_BYTE
#undef ARE_OVERLAPPING
#undef ARE_OVERLAPPING_WHOLE
#endif
|
turbocompression.h |
#ifndef TURBOCOMPRESSION_H_
#define TURBOCOMPRESSION_H_
#include "turbopacking32.h"
#include "turbopacking64.h"
#include "util.h"
/**
* "turbo" FOR packing uses an optimization to bit packing due to
* TurboPFor: we use 64-bit words as much as possible.
*
*/
/**
* Compresses "length" values from "in" to "out" and return a pointer to the end
* of the compressed stream.
* The format is "number of values, minimal value, maximal value, followed by
* packed data".
*
* Currently the implementation assumes that the integer arrays to be compressed
* are in multiples of 32, remaining integers are not compressed. Thus using this
* code on arrays smaller than 32 is wasteful. (This limitation will be removed
* in the future.)
*/
inline uint8_t *turbocompress(const uint32_t *in, uint32_t length,
uint8_t *out) {
memcpy(out, &length, sizeof(length));
out += sizeof(length);
if (length == 0)
return out;
uint32_t m = in[0];
uint32_t M = in[0];
for (uint32_t i = 1; i < length; ++i) {
if (in[i] > M)
M = in[i];
if (in[i] < m)
m = in[i];
}
int b = bits(static_cast<uint32_t>(M - m));
memcpy(out, &m, sizeof(m));
out += sizeof(m);
memcpy(out, &M, sizeof(M));
out += sizeof(M);
uint32_t k = 0;
for (; k + 32 <= length; k += 32) {
funcForPackArr[b](m, &in, &out);
}
// we could pack the rest, but we don't bother
memcpy(out, in, (length - k) * sizeof(uint32_t));
out += (length - k) * sizeof(uint32_t);
return out;
}
/*
* uncompress FOR data found in "in".
* The format is "number of values, minimal value, maximal value, followed by
* packed data".
* The 'nvalue' variable receives the number of decoded values (initial value is
* ignored)
* The values are stored in "out".
* We return a pointer to the end of the compressed input stream.
*/
inline const uint8_t *turbouncompress(const uint8_t *in, uint32_t *out,
uint32_t &nvalue) {
memcpy(&nvalue, in, sizeof(nvalue));
in += sizeof(nvalue);
if (nvalue == 0)
return in;
uint32_t m, M;
memcpy(&m, in, sizeof(m));
in += sizeof(m);
memcpy(&M, in, sizeof(M));
in += sizeof(M);
int b = bits(static_cast<uint32_t>(M - m));
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (uint32_t k = 0; k < nvalue / 32; ++k) {
// could code as funcForUnpackArr[b](m,&in,&out); but it hurts
// parallelization
const uint8_t *input = in + 32 * b * k / 8;
uint32_t *output = out + k * 32;
funcForUnpackArr[b](m, &input, &output);
}
in = in + (32 * b / 8) * (nvalue / 32);
out = out + 32 * (nvalue / 32);
// we could pack the rest, but we don't bother
uint32_t leftover = nvalue - nvalue / 32 * 32;
memcpy(out, in, leftover * sizeof(uint32_t));
in += leftover * sizeof(uint32_t);
return in;
}
/**
* Compresses "length" values from "in" to "out" and return a pointer to the end
* of the compressed stream.
* The format is "number of values, minimal value, maximal value, followed by
* packed data".
*
* Currently the implementation assumes that the integer arrays to be compressed
* are in multiples of 32, remaining integers are not compressed. Thus using this
* code on arrays smaller than 32 is wasteful. (This limitation will be removed
* in the future.)
*/
inline uint8_t *turbocompress64(const uint64_t *in, uint32_t length,
uint8_t *out) {
memcpy(out, &length, sizeof(length));
out += sizeof(length);
if (length == 0)
return out;
uint64_t m = in[0];
uint64_t M = in[0];
for (uint32_t i = 1; i < length; ++i) {
if (in[i] > M)
M = in[i];
if (in[i] < m)
m = in[i];
}
int b = bits64(static_cast<uint64_t>(M - m));
memcpy(out, &m, sizeof(m));
out += sizeof(m);
memcpy(out, &M, sizeof(M));
out += sizeof(M);
uint32_t k = 0;
for (; k + 32 <= length; k += 32) {
funcForPackArr64[b](m, &in, &out);
}
// we could pack the rest, but we don't bother
memcpy(out, in, (length - k) * sizeof(uint64_t));
out += (length - k) * sizeof(uint64_t);
return out;
}
/*
* uncompress FOR data found in "in".
* The format is "number of values, minimal value, maximal value, followed by
* packed data".
* The 'nvalue' variable receives the number of decoded values (initial value is
* ignored)
* The values are stored in "out".
* We return a pointer to the end of the compressed input stream.
*/
inline const uint8_t *turbouncompress64(const uint8_t *in, uint64_t *out,
uint32_t &nvalue) {
memcpy(&nvalue, in, sizeof(nvalue));
in += sizeof(nvalue);
if (nvalue == 0)
return in;
uint64_t m, M;
memcpy(&m, in, sizeof(m));
in += sizeof(m);
memcpy(&M, in, sizeof(M));
in += sizeof(M);
int b = bits64(static_cast<uint64_t>(M - m));
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (uint32_t k = 0; k < nvalue / 32; ++k) {
// could code as funcForUnpackArr[b](m,&in,&out); but it hurts
// parallelization
const uint8_t *input = in + 32 * b * k / 8;
uint64_t *output = out + k * 32;
funcForUnpackArr64[b](m, &input, &output);
}
in = in + (32 * b / 8) * (nvalue / 32);
out = out + 32 * (nvalue / 32);
// we could pack the rest, but we don't bother
uint32_t leftover = nvalue - nvalue / 32 * 32;
memcpy(out, in, leftover * sizeof(uint64_t));
in += leftover * sizeof(uint64_t);
return in;
}
#endif /* COMPRESSION_H_ */
|
convolution_1x1_pack8_fp16s.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv1x1s1_sgemm_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch)
{
// interleave
// src = inch-outch
// dst = 8b-8a-inch/8a-outch/8b
kernel_tm_pack8.create(1, inch / 8, outch / 8, (size_t)2u * 64, 64);
int q = 0;
for (; q + 7 < outch; q += 8)
{
const float* k0 = (const float*)kernel + (q + 0) * inch;
const float* k1 = (const float*)kernel + (q + 1) * inch;
const float* k2 = (const float*)kernel + (q + 2) * inch;
const float* k3 = (const float*)kernel + (q + 3) * inch;
const float* k4 = (const float*)kernel + (q + 4) * inch;
const float* k5 = (const float*)kernel + (q + 5) * inch;
const float* k6 = (const float*)kernel + (q + 6) * inch;
const float* k7 = (const float*)kernel + (q + 7) * inch;
__fp16* g0 = kernel_tm_pack8.channel(q / 8);
for (int p = 0; p + 7 < inch; p += 8)
{
for (int i = 0; i < 8; i++)
{
g0[0] = (__fp16)k0[i];
g0[1] = (__fp16)k1[i];
g0[2] = (__fp16)k2[i];
g0[3] = (__fp16)k3[i];
g0[4] = (__fp16)k4[i];
g0[5] = (__fp16)k5[i];
g0[6] = (__fp16)k6[i];
g0[7] = (__fp16)k7[i];
g0 += 8;
}
k0 += 8;
k1 += 8;
k2 += 8;
k3 += 8;
k4 += 8;
k5 += 8;
k6 += 8;
k7 += 8;
}
}
}
static void conv1x1s1_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
const int size = w * h;
const __fp16* bias = _bias;
// interleave
Mat tmp;
if (size >= 12)
tmp.create(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 8)
tmp.create(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 4)
tmp.create(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else if (size >= 2)
tmp.create(2, inch, size / 2 + size % 2, elemsize, elempack, opt.workspace_allocator);
else // if (size >= 1)
tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator);
{
int nn_size;
int remain_size_start;
nn_size = size / 12;
remain_size_start = nn_size * 12;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = ii * 12;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 12);
for (int q = 0; q < inch; q++)
{
// transpose 12x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n"
"ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n"
"sub %0, %0, #128 \n"
"uzp1 v20.8h, v0.8h, v4.8h \n"// 0
"uzp1 v21.8h, v16.8h, v1.8h \n"// 1
"uzp1 v22.8h, v5.8h, v17.8h \n"// 2
"uzp1 v23.8h, v2.8h, v6.8h \n"// 3
"uzp1 v24.8h, v18.8h, v3.8h \n"// 4
"uzp1 v25.8h, v7.8h, v19.8h \n"// 5
"uzp2 v26.8h, v0.8h, v4.8h \n"// 6
"uzp2 v27.8h, v16.8h, v1.8h \n"// 7
"uzp2 v28.8h, v5.8h, v17.8h \n"// 8
"uzp2 v29.8h, v2.8h, v6.8h \n"// 9
"uzp2 v30.8h, v18.8h, v3.8h \n"// 10
"uzp2 v31.8h, v7.8h, v19.8h \n"// 11
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
img0 += bottom_blob.cstep * 8;
}
}
nn_size = (size - remain_size_start) >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 8;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 3;
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1", "v2", "v3");
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 2;
nn_size = (size - remain_size_start) >> 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii = 0; ii < nn_size; ii++)
{
int i = remain_size_start + ii * 2;
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #256] \n"
"ld1 {v0.8h, v1.8h}, [%0] \n"
"st1 {v0.8h, v1.8h}, [%1], #32 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0", "v1");
img0 += bottom_blob.cstep * 8;
}
}
remain_size_start += nn_size << 1;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i = remain_size_start; i < size; i++)
{
const __fp16* img0 = bottom_blob.channel(0);
img0 += i * 8;
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
for (int q = 0; q < inch; q++)
{
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "v0");
img0 += bottom_blob.cstep * 8;
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
__fp16* outptr0 = top_blob.channel(p);
const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const __fp16* biasptr = bias ? bias + p * 8 : zeros;
int i = 0;
for (; i + 11 < size; i += 12)
{
__fp16* tmpptr = tmp.channel(i / 12);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v20.8h}, [%8] \n"
"mov v21.16b, v20.16b \n"
"mov v22.16b, v20.16b \n"
"mov v23.16b, v20.16b \n"
"mov v24.16b, v20.16b \n"
"mov v25.16b, v20.16b \n"
"mov v26.16b, v20.16b \n"
"mov v27.16b, v20.16b \n"
"mov v28.16b, v20.16b \n"
"mov v29.16b, v20.16b \n"
"mov v30.16b, v20.16b \n"
"mov v31.16b, v20.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123
"fmla v20.8h, v12.8h, v0.h[0] \n"
"fmla v21.8h, v12.8h, v0.h[1] \n"
"fmla v22.8h, v12.8h, v0.h[2] \n"
"fmla v23.8h, v12.8h, v0.h[3] \n"
"fmla v24.8h, v12.8h, v0.h[4] \n"
"fmla v25.8h, v12.8h, v0.h[5] \n"
"fmla v26.8h, v12.8h, v0.h[6] \n"
"fmla v27.8h, v12.8h, v0.h[7] \n"
"fmla v28.8h, v12.8h, v1.h[0] \n"
"fmla v29.8h, v12.8h, v1.h[1] \n"
"fmla v30.8h, v12.8h, v1.h[2] \n"
"fmla v31.8h, v12.8h, v1.h[3] \n"
"fmla v20.8h, v13.8h, v1.h[4] \n"
"fmla v21.8h, v13.8h, v1.h[5] \n"
"fmla v22.8h, v13.8h, v1.h[6] \n"
"fmla v23.8h, v13.8h, v1.h[7] \n"
"fmla v24.8h, v13.8h, v2.h[0] \n"
"fmla v25.8h, v13.8h, v2.h[1] \n"
"fmla v26.8h, v13.8h, v2.h[2] \n"
"fmla v27.8h, v13.8h, v2.h[3] \n"
"fmla v28.8h, v13.8h, v2.h[4] \n"
"fmla v29.8h, v13.8h, v2.h[5] \n"
"fmla v30.8h, v13.8h, v2.h[6] \n"
"fmla v31.8h, v13.8h, v2.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v20.8h, v14.8h, v3.h[0] \n"
"fmla v21.8h, v14.8h, v3.h[1] \n"
"fmla v22.8h, v14.8h, v3.h[2] \n"
"fmla v23.8h, v14.8h, v3.h[3] \n"
"fmla v24.8h, v14.8h, v3.h[4] \n"
"fmla v25.8h, v14.8h, v3.h[5] \n"
"fmla v26.8h, v14.8h, v3.h[6] \n"
"fmla v27.8h, v14.8h, v3.h[7] \n"
"fmla v28.8h, v14.8h, v4.h[0] \n"
"fmla v29.8h, v14.8h, v4.h[1] \n"
"fmla v30.8h, v14.8h, v4.h[2] \n"
"fmla v31.8h, v14.8h, v4.h[3] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567
"fmla v20.8h, v15.8h, v4.h[4] \n"
"fmla v21.8h, v15.8h, v4.h[5] \n"
"fmla v22.8h, v15.8h, v4.h[6] \n"
"fmla v23.8h, v15.8h, v4.h[7] \n"
"fmla v24.8h, v15.8h, v5.h[0] \n"
"fmla v25.8h, v15.8h, v5.h[1] \n"
"fmla v26.8h, v15.8h, v5.h[2] \n"
"fmla v27.8h, v15.8h, v5.h[3] \n"
"fmla v28.8h, v15.8h, v5.h[4] \n"
"fmla v29.8h, v15.8h, v5.h[5] \n"
"fmla v30.8h, v15.8h, v5.h[6] \n"
"fmla v31.8h, v15.8h, v5.h[7] \n"
"fmla v20.8h, v16.8h, v6.h[0] \n"
"fmla v21.8h, v16.8h, v6.h[1] \n"
"fmla v22.8h, v16.8h, v6.h[2] \n"
"fmla v23.8h, v16.8h, v6.h[3] \n"
"fmla v24.8h, v16.8h, v6.h[4] \n"
"fmla v25.8h, v16.8h, v6.h[5] \n"
"fmla v26.8h, v16.8h, v6.h[6] \n"
"fmla v27.8h, v16.8h, v6.h[7] \n"
"fmla v28.8h, v16.8h, v7.h[0] \n"
"fmla v29.8h, v16.8h, v7.h[1] \n"
"fmla v30.8h, v16.8h, v7.h[2] \n"
"fmla v31.8h, v16.8h, v7.h[3] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011
"fmla v20.8h, v17.8h, v7.h[4] \n"
"fmla v21.8h, v17.8h, v7.h[5] \n"
"fmla v22.8h, v17.8h, v7.h[6] \n"
"fmla v23.8h, v17.8h, v7.h[7] \n"
"fmla v24.8h, v17.8h, v8.h[0] \n"
"fmla v25.8h, v17.8h, v8.h[1] \n"
"fmla v26.8h, v17.8h, v8.h[2] \n"
"fmla v27.8h, v17.8h, v8.h[3] \n"
"fmla v28.8h, v17.8h, v8.h[4] \n"
"fmla v29.8h, v17.8h, v8.h[5] \n"
"fmla v30.8h, v17.8h, v8.h[6] \n"
"fmla v31.8h, v17.8h, v8.h[7] \n"
"fmla v20.8h, v18.8h, v9.h[0] \n"
"fmla v21.8h, v18.8h, v9.h[1] \n"
"fmla v22.8h, v18.8h, v9.h[2] \n"
"fmla v23.8h, v18.8h, v9.h[3] \n"
"fmla v24.8h, v18.8h, v9.h[4] \n"
"fmla v25.8h, v18.8h, v9.h[5] \n"
"fmla v26.8h, v18.8h, v9.h[6] \n"
"fmla v27.8h, v18.8h, v9.h[7] \n"
"fmla v28.8h, v18.8h, v10.h[0] \n"
"fmla v29.8h, v18.8h, v10.h[1] \n"
"fmla v30.8h, v18.8h, v10.h[2] \n"
"fmla v31.8h, v18.8h, v10.h[3] \n"
"subs %w0, %w0, #1 \n"
"fmla v20.8h, v19.8h, v10.h[4] \n"
"fmla v21.8h, v19.8h, v10.h[5] \n"
"fmla v22.8h, v19.8h, v10.h[6] \n"
"fmla v23.8h, v19.8h, v10.h[7] \n"
"fmla v24.8h, v19.8h, v11.h[0] \n"
"fmla v25.8h, v19.8h, v11.h[1] \n"
"fmla v26.8h, v19.8h, v11.h[2] \n"
"fmla v27.8h, v19.8h, v11.h[3] \n"
"fmla v28.8h, v19.8h, v11.h[4] \n"
"fmla v29.8h, v19.8h, v11.h[5] \n"
"fmla v30.8h, v19.8h, v11.h[6] \n"
"fmla v31.8h, v19.8h, v11.h[7] \n"
"bne 0b \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
"st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n"
"st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
for (; i + 7 < size; i += 8)
{
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"mov v18.16b, v16.16b \n"
"mov v19.16b, v16.16b \n"
"mov v20.16b, v16.16b \n"
"mov v21.16b, v16.16b \n"
"mov v22.16b, v16.16b \n"
"mov v23.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v0.h[1] \n"
"fmla v18.8h, v8.8h, v0.h[2] \n"
"fmla v19.8h, v8.8h, v0.h[3] \n"
"fmla v20.8h, v8.8h, v0.h[4] \n"
"fmla v21.8h, v8.8h, v0.h[5] \n"
"fmla v22.8h, v8.8h, v0.h[6] \n"
"fmla v23.8h, v8.8h, v0.h[7] \n"
"fmla v16.8h, v9.8h, v1.h[0] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v1.h[2] \n"
"fmla v19.8h, v9.8h, v1.h[3] \n"
"fmla v20.8h, v9.8h, v1.h[4] \n"
"fmla v21.8h, v9.8h, v1.h[5] \n"
"fmla v22.8h, v9.8h, v1.h[6] \n"
"fmla v23.8h, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567
"fmla v16.8h, v10.8h, v2.h[0] \n"
"fmla v17.8h, v10.8h, v2.h[1] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v2.h[3] \n"
"fmla v20.8h, v10.8h, v2.h[4] \n"
"fmla v21.8h, v10.8h, v2.h[5] \n"
"fmla v22.8h, v10.8h, v2.h[6] \n"
"fmla v23.8h, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v11.8h, v3.h[0] \n"
"fmla v17.8h, v11.8h, v3.h[1] \n"
"fmla v18.8h, v11.8h, v3.h[2] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v20.8h, v11.8h, v3.h[4] \n"
"fmla v21.8h, v11.8h, v3.h[5] \n"
"fmla v22.8h, v11.8h, v3.h[6] \n"
"fmla v23.8h, v11.8h, v3.h[7] \n"
"fmla v16.8h, v12.8h, v4.h[0] \n"
"fmla v17.8h, v12.8h, v4.h[1] \n"
"fmla v18.8h, v12.8h, v4.h[2] \n"
"fmla v19.8h, v12.8h, v4.h[3] \n"
"fmla v20.8h, v12.8h, v4.h[4] \n"
"fmla v21.8h, v12.8h, v4.h[5] \n"
"fmla v22.8h, v12.8h, v4.h[6] \n"
"fmla v23.8h, v12.8h, v4.h[7] \n"
"fmla v16.8h, v13.8h, v5.h[0] \n"
"fmla v17.8h, v13.8h, v5.h[1] \n"
"fmla v18.8h, v13.8h, v5.h[2] \n"
"fmla v19.8h, v13.8h, v5.h[3] \n"
"fmla v20.8h, v13.8h, v5.h[4] \n"
"fmla v21.8h, v13.8h, v5.h[5] \n"
"fmla v22.8h, v13.8h, v5.h[6] \n"
"fmla v23.8h, v13.8h, v5.h[7] \n"
"fmla v16.8h, v14.8h, v6.h[0] \n"
"fmla v17.8h, v14.8h, v6.h[1] \n"
"fmla v18.8h, v14.8h, v6.h[2] \n"
"fmla v19.8h, v14.8h, v6.h[3] \n"
"fmla v20.8h, v14.8h, v6.h[4] \n"
"fmla v21.8h, v14.8h, v6.h[5] \n"
"fmla v22.8h, v14.8h, v6.h[6] \n"
"fmla v23.8h, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v7.h[0] \n"
"fmla v17.8h, v15.8h, v7.h[1] \n"
"fmla v18.8h, v15.8h, v7.h[2] \n"
"fmla v19.8h, v15.8h, v7.h[3] \n"
"fmla v20.8h, v15.8h, v7.h[4] \n"
"fmla v21.8h, v15.8h, v7.h[5] \n"
"fmla v22.8h, v15.8h, v7.h[6] \n"
"fmla v23.8h, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
}
for (; i + 3 < size; i += 4)
{
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"mov v18.16b, v16.16b \n"
"mov v19.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v18.8h, v8.8h, v2.h[0] \n"
"fmla v19.8h, v8.8h, v3.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"fmla v18.8h, v9.8h, v2.h[1] \n"
"fmla v19.8h, v9.8h, v3.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v18.8h, v10.8h, v2.h[2] \n"
"fmla v19.8h, v10.8h, v3.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v18.8h, v11.8h, v2.h[3] \n"
"fmla v19.8h, v11.8h, v3.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v18.8h, v12.8h, v2.h[4] \n"
"fmla v19.8h, v12.8h, v3.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v18.8h, v13.8h, v2.h[5] \n"
"fmla v19.8h, v13.8h, v3.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"fmla v18.8h, v14.8h, v2.h[6] \n"
"fmla v19.8h, v14.8h, v3.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"fmla v18.8h, v15.8h, v2.h[7] \n"
"fmla v19.8h, v15.8h, v3.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19");
}
for (; i + 1 < size; i += 2)
{
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"mov v17.16b, v16.16b \n"
"0: \n"
"prfm pldl1keep, [%2, #256] \n"
"ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v17.8h, v8.8h, v1.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"fmla v17.8h, v9.8h, v1.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v17.8h, v10.8h, v1.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v17.8h, v11.8h, v1.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v17.8h, v12.8h, v1.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"fmla v17.8h, v13.8h, v1.h[5] \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v17.8h, v14.8h, v1.h[6] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"fmla v17.8h, v15.8h, v1.h[7] \n"
"bne 0b \n"
"st1 {v16.8h, v17.8h}, [%1], #32 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17");
}
for (; i < size; i++)
{
__fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const __fp16* kptr0 = kernel.channel(p);
int nn = inch; // inch always > 0
asm volatile(
"ld1 {v16.8h}, [%8] \n"
"0: \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.8h}, [%2], #16 \n" // r0
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123
"fmla v16.8h, v8.8h, v0.h[0] \n"
"fmla v16.8h, v9.8h, v0.h[1] \n"
"prfm pldl1keep, [%3, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567
"fmla v16.8h, v10.8h, v0.h[2] \n"
"fmla v16.8h, v11.8h, v0.h[3] \n"
"fmla v16.8h, v12.8h, v0.h[4] \n"
"fmla v16.8h, v13.8h, v0.h[5] \n"
"subs %w0, %w0, #1 \n"
"fmla v16.8h, v14.8h, v0.h[6] \n"
"fmla v16.8h, v15.8h, v0.h[7] \n"
"bne 0b \n"
"st1 {v16.8h}, [%1], #16 \n"
: "=r"(nn), // %0
"=r"(outptr0), // %1
"=r"(tmpptr), // %2
"=r"(kptr0) // %3
: "0"(nn),
"1"(outptr0),
"2"(tmpptr),
"3"(kptr0),
"r"(biasptr) // %8
: "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16");
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const __fp16 bias0 = bias ? bias[p] : 0.f;
//
// __fp16* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// __fp16 sum = bias0;
//
// const __fp16* kptr = _kernel.channel(p);
//
// for (int q=0; q<inch; q++)
// {
// const __fp16* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
{
int w = bottom_blob.w;
int channels = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
const int tailstep = (w - 2 * outw + w) * 8;
Mat bottom_blob_shrinked;
bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < channels; p++)
{
const __fp16* r0 = bottom_blob.channel(p);
__fp16* outptr = bottom_blob_shrinked.channel(p);
for (int i = 0; i < outh; i++)
{
int j = 0;
for (; j + 3 < outw; j += 4)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
float16x8_t _v2 = vld1q_f16(r0 + 32);
float16x8_t _v3 = vld1q_f16(r0 + 48);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
vst1q_f16(outptr + 16, _v2);
vst1q_f16(outptr + 24, _v3);
r0 += 64;
outptr += 32;
}
for (; j + 1 < outw; j += 2)
{
float16x8_t _v0 = vld1q_f16(r0);
float16x8_t _v1 = vld1q_f16(r0 + 16);
vst1q_f16(outptr, _v0);
vst1q_f16(outptr + 8, _v1);
r0 += 32;
outptr += 16;
}
for (; j < outw; j++)
{
float16x8_t _v = vld1q_f16(r0);
vst1q_f16(outptr, _v);
r0 += 16;
outptr += 8;
}
r0 += tailstep;
}
}
conv1x1s1_sgemm_pack8_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt);
}
|
opencl_sxc_fmt_plug.c | /*
* Modified by Dhiru Kholia <dhiru at openwall.com> for Keychain format.
*
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz@openwall.net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted. */
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_sxc;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_sxc);
#else
#include <string.h>
#include "sha.h"
#include <openssl/blowfish.h>
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "formats.h"
#include "common.h"
#include "stdint.h"
#include "misc.h"
#include "options.h"
#include "common.h"
#include "formats.h"
#include "common-opencl.h"
#define FORMAT_LABEL "sxc-opencl"
#define FORMAT_NAME "StarOffice .sxc"
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL Blowfish"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 20
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(sxc_cpu_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
typedef struct {
uint32_t length;
uint8_t v[20]; // hash of password
} sxc_password;
typedef struct {
uint32_t v[16/4];
} sxc_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint8_t length;
uint8_t salt[32];
} sxc_salt;
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[32 / sizeof(ARCH_WORD_32)];
typedef struct {
int cipher_type;
int checksum_type;
int iterations;
int key_size;
int iv_length;
int salt_length;
int original_length;
int length;
unsigned char iv[16];
unsigned char salt[32];
unsigned char content[1024];
} sxc_cpu_salt;
static sxc_cpu_salt *cur_salt;
static struct fmt_tests sxc_tests[] = {
{"$sxc$*0*0*1024*16*4448359828281a1e6842c31453473abfeae584fb*8*dc0248bea0c7508c*16*1d53770002fe9d8016064e5ef9423174*860*864*f00399ab17b9899cd517758ecf918d4da78099ccd3557aef5e22e137fd5b81f732fc7c167c4de0cf263b4f82b50e3d6abc65da613a36b0025d89e1a09adeb4106da28040d1019bb4b36630fc8bc94fe5b515504bf8a92ea630bb95ace074868e7c10743ec970c89895f44b975a30b6ca032354f3e73ec86b2cc7a4f7a185884026d971b37b1e0e650376a2552e27ba955c700f8903a82a6df11f6cc2ecf63290f02ffdd278f890d1db75f9e8bd0f437c4ec613d3c6dcb421bbd1067be633593ba9bd58f77ef08e0cca64c732f892567d20de8d4c444fa9c1c1adc5e4657ef9740cb69ce55c8f9e6b1cfed0739ef002f1e1c1e54a5df50a759d92354f78eb90a9d9378f36df7d1edd8002ea0d637604fcd2408494c2d42b1771e2a2a20b55044836f76db4ed71e8a53f55a04f9437946603e7246c2d2d70caf6be0de82e8977fab4de84ca3783baedac041195d8b51166b502ff80c17db78f63d3632df1d5ef5b14d8d5553fc40b072030f9e3374c93e929a490c6cfb170f04433fc46f43b9c7d27f3f8c4ed759d4a20c2e53a0701b7c3d9201390a9b5597ce8ba35bd765b662e2242b9821bbb63b6be502d2150fff37e4b7f2a6b592fd0e319a7349df320e7fe7da600a2a05628dc00e04d480c085417f676bd0518bc39d9a9be34fc0cb192d5fa5e0c657cdf7c1ad265a2e81b90ac8b28d326f98b8f33c123df83edc964d2c17a904d0df8bd9ecbf629929d6e48cadc97f49a8941ada3d219e8c0f04f37cecc9a50cc5307fd2a488c34829b05cd1615ae0d1ef0ce450529aa755f9ae38332187ffe4144990de3265afaacb9f0f0fb9c67f6210369f7a0cc5bb346412db08e0f4732f91aa8d4b32fe6eece4fba118f118f6df2fb6c53fa9bc164c9ab7a9d414d33281eb0c3cd02abe0a4dd1c170e41c1c960a8f12a48a7b5e1f748c08e1b150a4e389c110ea3368bc6c6ef2bee98dc92c6825cbf6aee20e690e116c0e6cf48d49b38035f6a9b0cd6053b9f5b9f8360024c9c608cbba3fe5e7966b656fa08dec3e3ce3178a0c0007b7d177c7c44e6a68f4c7325cb98264b1e0f391c75a6a8fd3691581fb68ef459458830f2138d0fd743631efd92b742dfeb62c5ea8502515eb65af414bf805992f9272a7b1b745970fd54e128751f8f6c0a4d5bc7872bc09c04037e1e91dc7192d68f780cdb0f7ef6b282ea883be462ffeffb7b396e30303030", "openwall"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*259cafe530bd09f8*16*8f53ea878d0795cfe05dcc65fb272c20*1024*1024*ffb0f736b69d8433f958e8f475f609948ad7c9dd052f2b92c14cb1b395ffcac043a3def76d58442e131701b3b53d56ea570633bb20c83068542420160f5db3cee5eece05b67b54d0d4cdf3fbfd928d94852924e391aa3f70cad598b48b946399b0cd1e9e7e7d081a888933f8a1973e83166799396e8290771463c623620b51fb5310b9b0e0de3e5597b66a091301ada3ba6a5d7515d1fcf0eff65e543b50f8fe2222619600054eaf69c7aa680c96bc403f115cab32d6d8e8bc0db3904a71ce3eb1283ca73fd6f75845d9b7d0275e4727a0f56bfbf962a9079f51849de2c9dee7f1dadbbae944f442169281004773309b0f8d68f2cf83076fd8b19afbccf5ab7dc58fb9554fee82e2c491d6434a4cef6f3209775343c840bc4bdfab6705000e67412ac74d00f5b6aba1fd21bca5213234a5a1100a9d93daa141a4936723ea77d008a35c9078167a3529706432b36b9ec012c060d093535c85ca6feb75165d620d7d663c3e76b9bf3af614556ed8560b446a8a73649cb935383a30b4fd8fd75522203e4575cf4bc2b7f01a9294310fe021c71acbf68f6f1e95f48c30c14151c51d4fb878a16272ee73753bc750cbd48007c842412ca1dcb6214215b082c00d619a5318e2ebe9149410f501170093784afc2bd71dd9f5a87b349b96661747b1627e8cba8a5c98559fb146fa7e30db4c6f648ce3c2209f84551a7a1cd46d9172ae1354b6d093f89f6f5f58d29c1d7af8830df62e67753caa8166322caa0f8adf4b61d2013d35baa7c002e1d4c83b1cba8aaa57cf4946627fa63ba7a6a5a5c803e8d5a4794845ab670ef950b918a360cd9f12e8f3424ecab1f505cb494ad35f28d12ff183471d0f47bd67e6abd3b8c8e206d11149474a19b5c13d165d8f6dc39cf579fe1000295328aeeb82e0ae8020d2f61e4c3d6e68c25a655ab72aad5e9e74af4cf27c74158fdb1a29a3d76cd658976fa0a30743247408df00a23b593f68861348a6c46af05d21a4b81fedbf5715462ec8ffc5f001a85c43058ac1fab488236588ef0bf08dd8dd7c7fce630a0a996395b503647d9a2f0dd63dd2f939eca8e1849ee4ed41a6d5672d947177e8f890692de879a20dd9e366ec494d270faf0d24fc076172a25998aac218586404687e7c77b55e77e0eff9b1c65c3f8da99deaa86411ab6aca2531d84b364349591bc73e7504163afd23c5208e321883ee611ea7e4e5885086e4fa7196e16b948cb54808b64b94106c74900e3190fd5f6068b490fd0c9c64481771527a0e2d00899fd5b7a9e7f508cc6770018fadf09d965d7a12ad3624d2161d9546d4a7937b5f961d7f7c4714786380c147e1ec6b0583503bd5a139b892831d1ea925993bb86f12e75d9010ceba230a1c286fa3d1d654a1672313cbf0763c05c622cee452f76957c42ba0e853ecda163d15e8600a702ccdc9e8f88a", "Ghe+t0Blaster"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*9bb755c8a4fe8c34*16*112b9d41098c8677615755361da473a6*1024*1024*b95f0f2e0e1c7b4ee61168b646804d4b70b615f3c978cec65c9a7ab515417c79625d104373fd5012c3da6b356f8408a3a75edcc8b2aad0aa38bb33edd8933bdadbffde35a350ade73ccb9df29c2996082f5e94e324496835f8dfebe15ca38950e0f435d711ef964aa09915d58287967b5e321ca195a7f90253157afe82329da9a496c97292419b9a94cdb92f919e6d54700466aff61c200c5a355905b5a37c12d77b0e4ffd23f0204cfa664f4c0545f233db8d35af5fe337b459135da398fd23101becb194db305496474ba4179a7355285a9ec935044e1831f290f5f87ed3e00925e7fb4fc6bc38d9f0cfe9abf72560400490d2fd398d2d49516b618f99168602f323dd1786bcca394830341dfbeb377f9b7ef161dc1470f5e92b6152fa7a4f428e8ae40100791491a9e1c9385298522320488f00535866ac6e08354a75b8b2fd293066da7eb6b4ad7f3e13c8dc98cd815b2393f147fdac6279f76fdac9abd0a94131fa84fe4e99634a362a56d60ce588f6e0b66d6f8b6d411511272ffe32181d20e7d2c3d4b680764607afb2c29dcb94a845b920e96f6c27575534f8b7f9ddd93bdcef0d717d0a899fa937e7d2eeeb6d5b0338757f6e69dac72524d4b6f74edce1f937008eb3653bcc31a88712af940cf47ec3f3efd83e4da89d1a6cb7da6cf8d7d41430bc81a4b5d7bb46cad687f2f505e3379143ae274eed6201c3b17c1e05e516a14cbf2351ccf9fdd46e1309afb170bd01eb8f6a1d8e12441525199455fb550e3fc689b1801332b2d985e336b158f846fcbca18fbe6ea21438cf1fb5fdbce8d6350e65d6468342880845675ec721af2fb9df917a3968b4a1a477fc4c74ee38a71a230d77c2a7cf66ae6b83804488cbd25213ebc470cd845a2691b16161a640ebb385aa2381dc91f692f6c4ca2709b5a7e94dfb4548000a29b56f1da08701945d6209fabbd1621b28849fc27810775f1a0e0204d3ae9040a8cfb1386499a39d87149cfc1579de7d059662ad25a67abd42b30bb3608f09142ca030351c3a1e921e4c7bbc11aab846ef42eb5d1418c15ada77539aca096e0678439cd1b60950d2aa0cc4d2004b1ac48dc6a454c5a8e9ea7e910047c7c83895fd614fd9dfd961631eb23757646143c2aeb03c1a6476e78fc4ccf0f02cc1f88ec1b0080a170ac6871dc183939f7a4376965b0dfa7922012582eec4846ee621edc5547a2b9c4893e7f67f76541a4bd4a91827a57b3db5cdea29a2a3cc20238d89c8145c14b037360ad27f54f87317ef70472d6b1fd9f1168bcf8aba6071257b3adebab8d4e115188ed4af3fc3574fdccb4bc7eeb00a6a442f1b96a989b735f5e6059ec72c1677b77f437dcb93066f8591a11071799c3a0ec3b48f6160976aff1928c375358837e1ef02e20397b2e9d8d9c4bff23172c9b4c0b941cb1b49b5bc070f72a14cd384", "M1racl33"},
{"$sxc$*0*0*1024*16*64983af0b26a6ee614e6c65b32c1d906f70c6397*8*ceb1edb1e3cb72fd*16*f7104c9b2789540f5fd4beef009c0139*1024*1024*709130b940a9663d0a5687133c6f78535d05a72936faed8c2f3c1b4e29423baaabcee4f0d7d57e3ad8d8c090486f974c4d0ce4be5b29ef8e1b02c01b4af1959ed0b277146a45aec35a48997b584b82697803193644eefd88a7eefcae8819839e13702f887278a597dd954babd82bf71bf8ca8559af0e5537be0264e358d36b4f5067960edf608de731e04d117de953386aadee71849edbc494fac3e6b14567a9e9c545a06d402acd3158441829f25478ed0f9086dabd2d3913b123b43c27176f8f08f30312d84e82d47654097a2bce95554357db3ce3d45a7441472067f55e4ea6244a3dedc23db4bea8f549109ffac382cf5b652c5b1ee431bcab1051567c263a9d668c5d6a15a6f8da754914746c1d3c7eb6347bdd8d6a3ac82e4c742fcf8721913c111dfd5398f2698db00f7220d2a3562e02f7f7a6505af3ba1ee10b46f2ab5b5d2f52d288fd12814c6edbcb8d50b6e8716fba0d5962747b971689fe75e94fa36ec39598ea30e15ab2b9c9f22ca04b890a13b18fb3c7a962050426bb2da08c8b993608b9c1ffd0a21e0c74e993242ead8eb30f86d7d2dcdbd4774d85c2e06adbe4b40050ff0ac1a8afe8fbc2175ec4da4676a691b1fce38421175734c20f07a604fea5287e1c33b420aa9db4de9bd97382c161b4ec0818add675e52ebf036aad779f24b824be4b2b013c470ff66cbf44f5800e128a3b328e80a5fd6295b9b3a94e915f9add6710cb9444432751a7a31c3a3422f48a5eabc26d9a52571b8447bdd0a5977ff7153d95337cef7ff2ec29774332fbeed6ee5eed5e12288cc13e14ba9d5ff3dd052e28ba96715f5b95d7ea214ebcd9e60b26308eb11370b824b5cff2644dd2117985b3c25ba8076d4025cf3a3a62da62d5e11d44422a142048e8cd00c7de6a0a55fd5dc09a3ed01dfe35b88268f351b6ff289fee8e52ac29fe32d9990e0d6d87f39727b6a762bac9d509c6ea235fc8bedc3bec2143eae9fd2cb831b798ef8261d72785002638b940947de0aad64f791f9a27e5b091e55adf4aee0649f6785bdd37e0248fedd1759d771aeacacb3ff6e7cf2d045f791428ab61710b54e869213393caf1b6bc99066678351deafc290cecc1f6b40b5532adbbab9a70408c61a437d4483b6a75cb61a55b20881efc0d849e0f60c1887f0fa091672179a145c4ab1b6487a0e939e0123d5aaffa3aec66ab593f9c25d27f22f4a73a999a4ab45e8bc7d71a85e2d40afadad1a1dc0b8389f96f91614293fa205583ef1c3440e3df50e8aa5f1a13e5929b72cd003461ff03d44d8c84bdada176b24459021d398b2b91b61a9c0b553a8714c703d32452c691a33f1581e98c2439514ca3e7deeef90850f8d6d89bf1d3a5762a56ef769ea588f5c1705bfb7b944cfbbb0632718ee3722f4e1929b35706d6413a315a11bc16349af109a7e675df2ab1eebe93", "excel123"},
{NULL}
};
static cl_int cl_error;
static sxc_password *inbuffer;
static sxc_hash *outbuffer;
static sxc_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
static size_t insize, outsize, settingsize;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl-autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(sxc_password) * gws;
outsize = sizeof(sxc_hash) * gws;
settingsize = sizeof(sxc_salt);
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
saved_key = mem_calloc(gws, sizeof(*saved_key));
crypt_out = mem_calloc(gws, sizeof(*crypt_out));
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (crypt_out) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(saved_key);
MEM_FREE(crypt_out);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
(int)sizeof(inbuffer->v),
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1,
self, create_clobj, release_clobj,
sizeof(sxc_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy;
char *keeptr;
char *p;
int res;
if (strncmp(ciphertext, "$sxc$*", 6))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += 6;
if ((p = strtokm(ctcopy, "*")) == NULL) /* cipher type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum type */
goto err;
res = atoi(p);
if (res != 0 && res != 1)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iterations */
goto err;
res = atoi(p);
if (res <= 0)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* key size */
goto err;
res = atoi(p);
if (res != 16 && res != 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* checksum field (skipped) */
goto err;
if (hexlenl(p) != BINARY_SIZE * 2)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv length */
goto err;
res = atoi(p);
if (res <= 0 || res > 16)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* iv */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt length */
goto err;
res = atoi(p);
if (res <= 0 || res > 32)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* salt */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if (!ishex(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* original length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024) /* 1024 because of "unsigned char output[1024];" in crypt_all */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length */
goto err;
res = atoi(p);
if (res <= 0 || res > 1024)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* content */
goto err;
if (hexlenl(p) != res * 2)
goto err;
if (strtokm(NULL, "*") != NULL) /* the end */
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static sxc_cpu_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += 6; /* skip over "$sxc$*" */
p = strtokm(ctcopy, "*");
cs.cipher_type = atoi(p);
p = strtokm(NULL, "*");
cs.checksum_type = atoi(p);
p = strtokm(NULL, "*");
cs.iterations = atoi(p);
p = strtokm(NULL, "*");
cs.key_size = atoi(p);
strtokm(NULL, "*");
/* skip checksum field */
p = strtokm(NULL, "*");
cs.iv_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.iv_length; i++)
cs.iv[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.salt_length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.salt_length; i++)
cs.salt[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.original_length = atoi(p);
p = strtokm(NULL, "*");
cs.length = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length; i++)
cs.content[i] = atoi16[ARCH_INDEX(p[i * 2])] * 16
+ atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
ctcopy += 6; /* skip over "$sxc$*" */
strtokm(ctcopy, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
strtokm(NULL, "*");
p = strtokm(NULL, "*");
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
MEM_FREE(keeptr);
return out;
}
static void set_salt(void *salt)
{
cur_salt = (sxc_cpu_salt*)salt;
memcpy((char*)currentsalt.salt, cur_salt->salt, cur_salt->salt_length);
currentsalt.length = cur_salt->salt_length;
currentsalt.iterations = cur_salt->iterations;
currentsalt.outlen = cur_salt->key_size;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
#undef set_key
static void set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
unsigned char hash[20];
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, (unsigned char *)saved_key[index], strlen(saved_key[index]));
SHA1_Final((unsigned char *)hash, &ctx);
memcpy(inbuffer[index].v, hash, 20);
inbuffer[index].length = 20;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for(index = 0; index < count; index++)
{
BF_KEY bf_key;
SHA_CTX ctx;
int bf_ivec_pos;
unsigned char ivec[8];
unsigned char output[1024];
bf_ivec_pos = 0;
memcpy(ivec, cur_salt->iv, 8);
BF_set_key(&bf_key, cur_salt->key_size, (const unsigned char*)outbuffer[index].v);
BF_cfb64_encrypt(cur_salt->content, output, cur_salt->length, &bf_key, ivec, &bf_ivec_pos, 0);
SHA1_Init(&ctx);
SHA1_Update(&ctx, output, cur_salt->original_length);
SHA1_Final((unsigned char*)crypt_out[index], &ctx);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static unsigned int iteration_count(void *salt)
{
sxc_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->iterations;
}
struct fmt_main fmt_opencl_sxc = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
sxc_tests
}, {
init,
done,
reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
owl_ndarray_pool_impl.h | /*
* OWL - OCaml Scientific and Engineering Computing
* Copyright (c) 2016-2018 Liang Wang <liang.wang@cl.cam.ac.uk>
*/
#ifdef OWL_ENABLE_TEMPLATE
CAMLprim value FUN_NATIVE (spatial) (
value vInput_ptr, value vOutput_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPadding, value vRow_in_stride, value vCol_in_stride
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
int row_in_stride = Long_val(vRow_in_stride);
int col_in_stride = Long_val(vCol_in_stride);
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
int pr = 0, pc = 0;
if (padding != 1){
pr = (row_stride * ( output_rows - 1) + kernel_rows - input_rows) / 2;
pc = (col_stride * ( output_cols - 1) + kernel_cols - input_cols) / 2;
if (pr < 0) pr = 0;
if (pc < 0) pc = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, c);
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial) (value * argv, int argn) {
return FUN_NATIVE (spatial) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (spatial_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
const int ksize = kernel_cols * kernel_rows;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
if (pad_cols < 0) pad_cols = 0;
if (pad_rows < 0) pad_rows = 0;
memset(input_backward_ptr, 0,
batches * input_cols * input_rows * in_channel * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
idx[c++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < c; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, c);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, c);
#endif
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_backward) (value * argv, int argn) {
return FUN_NATIVE (spatial_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
CAMLprim value FUN_NATIVE (cuboid) (
value vInput, value vOutput,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vDpt_stride, value vRow_stride, value vCol_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int dpt_stride = Long_val(vDpt_stride);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int padding = Long_val(vPadding);
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
memset(output_ptr, 0, batches * output_crdi * sizeof(TYPE));
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c){
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
TYPE t = *(input_ptr + input_idx);
ACCFN (acc, t);
counter++;
}
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = UPDATEFN (acc, counter);
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid) (value * argv, int argn) {
return FUN_NATIVE (cuboid) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16]
);
}
CAMLprim value FUN_NATIVE (cuboid_backward) (
value vInput, value vOutput_back, value vInput_back,
value vBatches, value vInput_cols, value vInput_rows,
value vInput_dpts, value vIn_channel,
value vKernel_cols, value vKernel_rows, value vKernel_dpts,
value vOutput_cols, value vOutput_rows, value vOutput_dpts,
value vCol_stride, value vRow_stride, value vDpt_stride,
value vPadding
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput);
struct caml_ba_array *OUB = Caml_ba_array_val(vOutput_back);
struct caml_ba_array *INB = Caml_ba_array_val(vInput_back);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_backward_ptr = (TYPE *) OUB->data;
TYPE *input_backward_ptr = (TYPE *) INB->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int input_dpts = Long_val(vInput_dpts);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int kernel_dpts = Long_val(vKernel_dpts);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int output_dpts = Long_val(vOutput_dpts);
int col_stride = Long_val(vCol_stride);
int row_stride = Long_val(vRow_stride);
int dpt_stride = Long_val(vDpt_stride);
int padding = Long_val(vPadding);
const int ksize = kernel_cols * kernel_rows * kernel_dpts;
const int output_crdi = output_cols * output_rows * output_dpts * in_channel;
const int output_rdi = output_rows * output_dpts * in_channel;
const int output_di = output_dpts * in_channel;
const int input_crdi = input_cols * input_rows * input_dpts * in_channel;
const int input_rdi = input_rows * input_dpts * in_channel;
const int input_di = input_dpts * in_channel;
int pd, pr, pc;
if (padding == 1) {
pc = 0; pr = 0; pd = 0;
} else {
int pad_cols = col_stride * (output_cols - 1) + kernel_cols - input_cols;
int pad_rows = row_stride * (output_rows - 1) + kernel_rows - input_rows;
int pad_dpts = dpt_stride * (output_dpts - 1) + kernel_dpts - input_dpts;
pc = pad_cols / 2; if (pc < 0) pc = 0;
pr = pad_rows / 2; if (pr < 0) pr = 0;
pd = pad_dpts / 2; if (pd < 0) pd = 0;
}
memset(input_backward_ptr, 0, batches * input_crdi * sizeof(TYPE));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_crdi;
const int output_idx_base_i = i * output_crdi;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_rdi;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base_k = output_idx_base_j + k * output_di;
for (int d = 0; d < output_dpts; ++d) {
const int output_idx_base = output_idx_base_k + d * in_channel;
const int cstart = j * col_stride - pc;
const int rstart = k * row_stride - pr;
const int dstart = d * dpt_stride - pd;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
const int dend = dstart + kernel_dpts;
for (int l = 0; l < in_channel; ++l) {
TYPE m;
int output_idx = output_idx_base + l;
m = *(output_backward_ptr + output_idx);
int idx[ksize];
memset(idx, 0, ksize * sizeof(int));
TYPE acc = INITACC;
int max_idx = 0;
int counter = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
for (int c = dstart; c < dend; ++c) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows &&
c >= 0 && c < input_dpts) {
int input_idx =
input_idx_base + a * input_rdi + b * input_di +
c * in_channel + l;
idx[counter++] = input_idx;
#ifdef OWL_NDARRAY_MAX
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
#endif
}
}
}
}
#ifdef OWL_NDARRAY_AVG
for (int i = 0; i < counter; i++) {
*(input_backward_ptr + idx[i]) += UPDATEFN (m, counter);
}
#else
*(input_backward_ptr + max_idx) += UPDATEFN (m, counter);
#endif
}
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (cuboid_backward) (value * argv, int argn) {
return FUN_NATIVE (cuboid_backward) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14],
argv[15], argv[16], argv[17]
);
}
#ifdef OWL_NDARRAY_MAX
CAMLprim value FUN_NATIVE (spatial_arg) (
value vInput_ptr, value vOutput_ptr, value vArgmax_ptr,
value vBatches, value vInput_cols, value vInput_rows, value vIn_channel,
value vKernel_cols, value vKernel_rows,
value vOutput_cols, value vOutput_rows,
value vRow_stride, value vCol_stride,
value vPad_rows, value vPad_cols
) {
struct caml_ba_array *IN = Caml_ba_array_val(vInput_ptr);
struct caml_ba_array *OU = Caml_ba_array_val(vOutput_ptr);
struct caml_ba_array *AG = Caml_ba_array_val(vArgmax_ptr);
TYPE *input_ptr = (TYPE *) IN->data;
TYPE *output_ptr = (TYPE *) OU->data;
int64_t *argmax_ptr = (int64_t *) AG->data;
int batches = Long_val(vBatches);
int input_cols = Long_val(vInput_cols);
int input_rows = Long_val(vInput_rows);
int in_channel = Long_val(vIn_channel);
int kernel_cols = Long_val(vKernel_cols);
int kernel_rows = Long_val(vKernel_rows);
int output_cols = Long_val(vOutput_cols);
int output_rows = Long_val(vOutput_rows);
int row_stride = Long_val(vRow_stride);
int col_stride = Long_val(vCol_stride);
int pad_rows = Long_val(vPad_rows);
int pad_cols = Long_val(vPad_cols);
if (pad_rows < 0) pad_rows = 0.;
if (pad_cols < 0) pad_cols = 0.;
const int input_cri = input_cols * input_rows * in_channel;
const int input_ri = input_rows * in_channel;
const int output_cri = output_cols * output_rows * in_channel;
const int output_ri = output_rows * in_channel;
memset(output_ptr, 0, batches * output_cri * sizeof(TYPE));
memset(argmax_ptr, 0, batches * output_cri * sizeof(int64_t));
#ifdef _OPENMP
#pragma omp parallel for schedule(static)
#endif /* _OPENMP */
for (int i = 0; i < batches; ++i) {
const int input_idx_base = i * input_cri;
const int output_idx_base_i = i * output_cri;
for (int j = 0; j < output_cols; ++j) {
const int output_idx_base_j = output_idx_base_i + j * output_ri;
for (int k = 0; k < output_rows; ++k) {
const int output_idx_base = output_idx_base_j + k * in_channel;
const int cstart = j * col_stride - pad_cols;
const int rstart = k * row_stride - pad_rows;
const int cend = cstart + kernel_cols;
const int rend = rstart + kernel_rows;
for (int l = 0; l < in_channel; ++l) {
TYPE acc = INITACC;
int max_idx = -1;
int c = 0;
for (int a = cstart; a < cend; ++a) {
for (int b = rstart; b < rend; ++b) {
if (a >= 0 && a < input_cols &&
b >= 0 && b < input_rows) {
int input_idx =
input_idx_base + a * input_ri + b * in_channel + l;
TYPE t = *(input_ptr + input_idx);
if (PLT(acc,t)){
acc = t;
max_idx = input_idx;
}
c++;
}
}
}
int output_idx = output_idx_base + l;
*(output_ptr + output_idx) = acc;
*(argmax_ptr + output_idx) = (int64_t) max_idx;
}
}
}
}
return Val_unit;
}
CAMLprim value FUN_BYTE (spatial_arg) (value * argv, int argn) {
return FUN_NATIVE (spatial_arg) (
argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7],
argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]
);
}
#endif /* OWL_NDARRAY_MAX */
#endif /* OWL_ENABLE_TEMPLATE */
|
mass_sum.c | #include "mass_sum.h"
#define REAL_CELL 1
double mass_sum(int ncells, int* restrict celltype,
double* restrict H, double* restrict dx, double* restrict dy){
double summer = 0.0;
#pragma omp target teams distribute parallel for simd reduction(+:summer)
for (int ic=0; ic<ncells ; ic++) {
if (celltype[ic] == REAL_CELL) {
summer += H[ic]*dx[ic]*dy[ic];
}
}
return(summer);
}
|
ej3reductionmejora.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#define N 1024
int main () {
int memsize = N*sizeof(float);
float *a = (float *) malloc (memsize);
float *b = (float *) malloc (memsize);
for (int i=0;i<N; ++i){
a[i]=b[i]=1.0f;
}
int numthreads=4;
omp_set_num_threads(numthreads);
float *mem = (float*) malloc(sizeof(float)*numthreads),result=0.0f;
#pragma omp parallel for
for (int i=0;i<N; ++i) {
*(mem+omp_get_thread_num())+= *(a+i)*(*(b+i));
}
for(int i=0;i<numthreads;++i)
result+=*(mem+i);
printf ("%f, ", result);
printf ("\n");
}
|
GB_unaryop__abs_uint32_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_int8
// op(A') function: GB_tran__abs_uint32_int8
// C type: uint32_t
// A type: int8_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint32_t z = (uint32_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_int8
(
uint32_t *restrict Cx,
const int8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
detector.c | #include "darknet.h"
static int coco_ids[] = {1,2,3,4,5,6,7,8,9,10,11,13,14,15,16,17,18,19,20,21,22,23,24,25,27,28,31,32,33,34,35,36,37,38,39,40,41,42,43,44,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,67,70,72,73,74,75,76,77,78,79,80,81,82,84,85,86,87,88,89,90};
void train_detector(char *datacfg, char *cfgfile, char *weightfile, int *gpus, int ngpus, int clear)
{
list *options = read_data_cfg(datacfg);
char *train_images = option_find_str(options, "train", "data/train.list");
char *backup_directory = option_find_str(options, "backup", "/backup/");
srand(time(0));
char *base = basecfg(cfgfile);
printf("%s\n", base);
float avg_loss = -1;
network **nets = calloc(ngpus, sizeof(network));
srand(time(0));
int seed = rand();
int i;
for(i = 0; i < ngpus; ++i){
srand(seed);
#ifdef GPU
cuda_set_device(gpus[i]);
#endif
nets[i] = load_network(cfgfile, weightfile, clear);
nets[i]->learning_rate *= ngpus;
}
srand(time(0));
network *net = nets[0];
int imgs = net->batch * net->subdivisions * ngpus;
printf("Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
data train, buffer;
layer l = net->layers[net->n - 1];
int classes = l.classes;
float jitter = l.jitter;
list *plist = get_paths(train_images);
//int N = plist->size;
char **paths = (char **)list_to_array(plist);
load_args args = get_base_args(net);
args.coords = l.coords;
args.paths = paths;
args.n = imgs;
args.m = plist->size;
args.classes = classes;
args.jitter = jitter;
args.num_boxes = l.max_boxes;
args.d = &buffer;
args.type = DETECTION_DATA;
//args.type = INSTANCE_DATA;
args.threads = 64;
pthread_t load_thread = load_data(args);
double time;
int count = 0;
//while(i*imgs < N*120){
while(get_current_batch(net) < net->max_batches){
if(l.random && count++%10 == 0){
//printf("Resizing\n");
int dim = (rand() % 10 + 10) * 32;
if (get_current_batch(net)+200 > net->max_batches) dim = 608;
//int dim = (rand() % 4 + 16) * 32;
printf("%d\n", dim);
args.w = dim;
args.h = dim;
pthread_join(load_thread, 0);
train = buffer;
free_data(train);
load_thread = load_data(args);
#pragma omp parallel for
for(i = 0; i < ngpus; ++i){
resize_network(nets[i], dim, dim);
}
net = nets[0];
}
time=what_time_is_it_now();
pthread_join(load_thread, 0);
train = buffer;
load_thread = load_data(args);
/*
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[10] + 1 + k*5);
if(!b.x) break;
printf("loaded: %f %f %f %f\n", b.x, b.y, b.w, b.h);
}
*/
/*
int zz;
for(zz = 0; zz < train.X.cols; ++zz){
image im = float_to_image(net->w, net->h, 3, train.X.vals[zz]);
int k;
for(k = 0; k < l.max_boxes; ++k){
box b = float_to_box(train.y.vals[zz] + k*5, 1);
printf("%f %f %f %f\n", b.x, b.y, b.w, b.h);
draw_bbox(im, b, 1, 1,0,0);
}
show_image(im, "truth11");
cvWaitKey(0);
save_image(im, "truth11");
}
*/
if(i%500==0 || (i<1000 && i%100 == 0))printf("Loaded: %lf seconds\n", what_time_is_it_now()-time);
time=what_time_is_it_now();
float loss = 0;
#ifdef GPU
if(ngpus == 1){
loss = train_network(net, train);
} else {
loss = train_networks(nets, ngpus, train, 4);
}
#else
loss = train_network(net, train);
#endif
if (avg_loss < 0) avg_loss = loss;
avg_loss = avg_loss*.9 + loss*.1;
i = get_current_batch(net);
if(i%500==0 || (i<1000 && i%100 == 0))printf("%ld: %f, %f avg, %f rate, %lf seconds, %d images\n", get_current_batch(net), loss, avg_loss, get_current_rate(net), what_time_is_it_now()-time, i*imgs);
if(i%100==0){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s.backup", backup_directory, base);
save_weights(net, buff);
}
if(i%500==0 || (i < 1000 && i%100 == 0)){
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_%d.weights", backup_directory, base, i);
save_weights(net, buff);
}
free_data(train);
}
#ifdef GPU
if(ngpus != 1) sync_nets(nets, ngpus, 0);
#endif
char buff[256];
sprintf(buff, "%s/%s_final.weights", backup_directory, base);
save_weights(net, buff);
}
static int get_coco_image_id(char *filename)
{
char *p = strrchr(filename, '/');
char *c = strrchr(filename, '_');
if(c) p = c;
return atoi(p+1);
}
static void print_cocos(FILE *fp, char *image_path, detection *dets, int num_boxes, int classes, int w, int h)
{
int i, j;
int image_id = get_coco_image_id(image_path);
for(i = 0; i < num_boxes; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
float bx = xmin;
float by = ymin;
float bw = xmax - xmin;
float bh = ymax - ymin;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fp, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f},\n", image_id, coco_ids[j], bx, by, bw, bh, dets[i].prob[j]);
}
}
}
void print_detector_detections(FILE **fps, char *id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2. + 1;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2. + 1;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2. + 1;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2. + 1;
if (xmin < 1) xmin = 1;
if (ymin < 1) ymin = 1;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
if (dets[i].prob[j]) fprintf(fps[j], "%s %f %f %f %f %f\n", id, dets[i].prob[j],
xmin, ymin, xmax, ymax);
}
}
}
void print_imagenet_detections(FILE *fp, int id, detection *dets, int total, int classes, int w, int h)
{
int i, j;
for(i = 0; i < total; ++i){
float xmin = dets[i].bbox.x - dets[i].bbox.w/2.;
float xmax = dets[i].bbox.x + dets[i].bbox.w/2.;
float ymin = dets[i].bbox.y - dets[i].bbox.h/2.;
float ymax = dets[i].bbox.y + dets[i].bbox.h/2.;
if (xmin < 0) xmin = 0;
if (ymin < 0) ymin = 0;
if (xmax > w) xmax = w;
if (ymax > h) ymax = h;
for(j = 0; j < classes; ++j){
int class = j;
if (dets[i].prob[class]) fprintf(fp, "%d %d %f %f %f %f %f\n", id, j+1, dets[i].prob[class],
xmin, ymin, xmax, ymax);
}
}
}
void validate_detector_flip(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 2);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
image input = make_image(net->w, net->h, net->c*2);
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data, 1);
flip_image(val_resized[t]);
copy_cpu(net->w*net->h*net->c, val_resized[t].data, 1, input.data + net->w*net->h*net->c, 1);
network_predict(net, input.data);
int w = val[t].w;
int h = val[t].h;
int num = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &num);
if (nms) do_nms_sort(dets, num, classes, nms);
if (coco){
print_cocos(fp, path, dets, num, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, num, classes, w, h);
} else {
print_detector_detections(fps, id, dets, num, classes, w, h);
}
free_detections(dets, num);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector(char *datacfg, char *cfgfile, char *weightfile, char *outfile)
{
int j;
list *options = read_data_cfg(datacfg);
char *valid_images = option_find_str(options, "valid", "data/train.list");
char *name_list = option_find_str(options, "names", "data/names.list");
char *prefix = option_find_str(options, "results", "results");
char **names = get_labels(name_list);
char *mapf = option_find_str(options, "map", 0);
int *map = 0;
if (mapf) map = read_map(mapf);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths(valid_images);
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int classes = l.classes;
char buff[1024];
char *type = option_find_str(options, "eval", "voc");
FILE *fp = 0;
FILE **fps = 0;
int coco = 0;
int imagenet = 0;
if(0==strcmp(type, "coco")){
if(!outfile) outfile = "coco_results";
snprintf(buff, 1024, "%s/%s.json", prefix, outfile);
fp = fopen(buff, "w");
fprintf(fp, "[\n");
coco = 1;
} else if(0==strcmp(type, "imagenet")){
if(!outfile) outfile = "imagenet-detection";
snprintf(buff, 1024, "%s/%s.txt", prefix, outfile);
fp = fopen(buff, "w");
imagenet = 1;
classes = 200;
} else {
if(!outfile) outfile = "comp4_det_test_";
fps = calloc(classes, sizeof(FILE *));
for(j = 0; j < classes; ++j){
snprintf(buff, 1024, "%s/%s%s.txt", prefix, outfile, names[j]);
fps[j] = fopen(buff, "w");
}
}
int m = plist->size;
int i=0;
int t;
float thresh = .005;
float nms = .45;
int nthreads = 4;
image *val = calloc(nthreads, sizeof(image));
image *val_resized = calloc(nthreads, sizeof(image));
image *buf = calloc(nthreads, sizeof(image));
image *buf_resized = calloc(nthreads, sizeof(image));
pthread_t *thr = calloc(nthreads, sizeof(pthread_t));
load_args args = {0};
args.w = net->w;
args.h = net->h;
//args.type = IMAGE_DATA;
args.type = LETTERBOX_DATA;
for(t = 0; t < nthreads; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
double start = what_time_is_it_now();
for(i = nthreads; i < m+nthreads; i += nthreads){
fprintf(stderr, "%d\n", i);
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
pthread_join(thr[t], 0);
val[t] = buf[t];
val_resized[t] = buf_resized[t];
}
for(t = 0; t < nthreads && i+t < m; ++t){
args.path = paths[i+t];
args.im = &buf[t];
args.resized = &buf_resized[t];
thr[t] = load_data_in_thread(args);
}
for(t = 0; t < nthreads && i+t-nthreads < m; ++t){
char *path = paths[i+t-nthreads];
char *id = basecfg(path);
float *X = val_resized[t].data;
network_predict(net, X);
int w = val[t].w;
int h = val[t].h;
int nboxes = 0;
detection *dets = get_network_boxes(net, w, h, thresh, .5, map, 0, &nboxes);
if (nms) do_nms_sort(dets, nboxes, classes, nms);
if (coco){
print_cocos(fp, path, dets, nboxes, classes, w, h);
} else if (imagenet){
print_imagenet_detections(fp, i+t-nthreads+1, dets, nboxes, classes, w, h);
} else {
print_detector_detections(fps, id, dets, nboxes, classes, w, h);
}
free_detections(dets, nboxes);
free(id);
free_image(val[t]);
free_image(val_resized[t]);
}
}
for(j = 0; j < classes; ++j){
if(fps) fclose(fps[j]);
}
if(coco){
fseek(fp, -2, SEEK_CUR);
fprintf(fp, "\n]\n");
fclose(fp);
}
fprintf(stderr, "Total Detection Time: %f Seconds\n", what_time_is_it_now() - start);
}
void validate_detector_recall(char *cfgfile, char *weightfile)
{
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
fprintf(stderr, "Learning Rate: %g, Momentum: %g, Decay: %g\n", net->learning_rate, net->momentum, net->decay);
srand(time(0));
list *plist = get_paths("data/coco_val_5k.list");
char **paths = (char **)list_to_array(plist);
layer l = net->layers[net->n-1];
int j, k;
int m = plist->size;
int i=0;
float thresh = .001;
float iou_thresh = .5;
float nms = .4;
int total = 0;
int correct = 0;
int proposals = 0;
float avg_iou = 0;
for(i = 0; i < m; ++i){
char *path = paths[i];
image orig = load_image_color(path, 0, 0);
image sized = resize_image(orig, net->w, net->h);
char *id = basecfg(path);
network_predict(net, sized.data);
int nboxes = 0;
detection *dets = get_network_boxes(net, sized.w, sized.h, thresh, .5, 0, 1, &nboxes);
if (nms) do_nms_obj(dets, nboxes, 1, nms);
char labelpath[4096];
find_replace(path, "images", "labels", labelpath);
find_replace(labelpath, "JPEGImages", "labels", labelpath);
find_replace(labelpath, ".jpg", ".txt", labelpath);
find_replace(labelpath, ".JPEG", ".txt", labelpath);
int num_labels = 0;
box_label *truth = read_boxes(labelpath, &num_labels);
for(k = 0; k < nboxes; ++k){
if(dets[k].objectness > thresh){
++proposals;
}
}
for (j = 0; j < num_labels; ++j) {
++total;
box t = {truth[j].x, truth[j].y, truth[j].w, truth[j].h};
float best_iou = 0;
for(k = 0; k < l.w*l.h*l.n; ++k){
float iou = box_iou(dets[k].bbox, t);
if(dets[k].objectness > thresh && iou > best_iou){
best_iou = iou;
}
}
avg_iou += best_iou;
if(best_iou > iou_thresh){
++correct;
}
}
fprintf(stderr, "%5d %5d %5d\tRPs/Img: %.2f\tIOU: %.2f%%\tRecall:%.2f%%\n", i, correct, total, (float)proposals/(i+1), avg_iou*100/total, 100.*correct/total);
free(id);
free_image(orig);
free_image(sized);
}
}
void test_detector(char *datacfg, char *cfgfile, char *weightfile, char *filename, float thresh, float hier_thresh, char *outfile, int fullscreen)
{
list *options = read_data_cfg(datacfg);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
image **alphabet = load_alphabet();
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
double time;
char buff[256];
char *input = buff;
float nms=.45;
while(1){
if(filename){
strncpy(input, filename, 256);
} else {
printf("Enter Image Path: ");
fflush(stdout);
input = fgets(input, 256, stdin);
if(!input) return;
strtok(input, "\n");
}
image im = load_image_color(input,0,0);
image sized = letterbox_image(im, net->w, net->h);
//image sized = resize_image(im, net->w, net->h);
//image sized2 = resize_max(im, net->w);
//image sized = crop_image(sized2, -((net->w - sized2.w)/2), -((net->h - sized2.h)/2), net->w, net->h);
//resize_network(net, sized.w, sized.h);
layer l = net->layers[net->n-1];
float *X = sized.data;
time=what_time_is_it_now();
network_predict(net, X);
printf("%s: Predicted in %f seconds.\n", input, what_time_is_it_now()-time);
int nboxes = 0;
detection *dets = get_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 1, &nboxes);
//printf("%d\n", nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
draw_detections(im, dets, nboxes, thresh, names, alphabet, l.classes);
free_detections(dets, nboxes);
if(outfile){
save_image(im, outfile);
}
else{
save_image(im, "predictions");
#ifdef OPENCV
make_window("predictions", 512, 512, 0);
show_image(im, "predictions", 0);
#endif
}
free_image(im);
free_image(sized);
if (filename) break;
}
}
/*
void censor_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
float *X = in_s.data;
network_predict(net, X);
int nboxes = 0;
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 0, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int left = b.x-b.w/2.;
int top = b.y-b.h/2.;
censor_image(in, left, top, b.w, b.h);
}
}
show_image(in, base);
cvWaitKey(10);
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
void extract_detector(char *datacfg, char *cfgfile, char *weightfile, int cam_index, const char *filename, int class, float thresh, int skip)
{
#ifdef OPENCV
char *base = basecfg(cfgfile);
network *net = load_network(cfgfile, weightfile, 0);
set_batch_network(net, 1);
srand(2222222);
CvCapture * cap;
int w = 1280;
int h = 720;
if(filename){
cap = cvCaptureFromFile(filename);
}else{
cap = cvCaptureFromCAM(cam_index);
}
if(w){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, w);
}
if(h){
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, h);
}
if(!cap) error("Couldn't connect to webcam.\n");
cvNamedWindow(base, CV_WINDOW_NORMAL);
cvResizeWindow(base, 512, 512);
float fps = 0;
int i;
int count = 0;
float nms = .45;
while(1){
image in = get_image_from_stream(cap);
//image in_s = resize_image(in, net->w, net->h);
image in_s = letterbox_image(in, net->w, net->h);
layer l = net->layers[net->n-1];
show_image(in, base);
int nboxes = 0;
float *X = in_s.data;
network_predict(net, X);
detection *dets = get_network_boxes(net, in.w, in.h, thresh, 0, 0, 1, &nboxes);
//if (nms) do_nms_obj(boxes, probs, l.w*l.h*l.n, l.classes, nms);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
for(i = 0; i < nboxes; ++i){
if(dets[i].prob[class] > thresh){
box b = dets[i].bbox;
int size = b.w*in.w > b.h*in.h ? b.w*in.w : b.h*in.h;
int dx = b.x*in.w-size/2.;
int dy = b.y*in.h-size/2.;
image bim = crop_image(in, dx, dy, size, size);
char buff[2048];
sprintf(buff, "results/extract/%07d", count);
++count;
save_image(bim, buff);
free_image(bim);
}
}
free_detections(dets, nboxes);
free_image(in_s);
free_image(in);
float curr = 0;
fps = .9*fps + .1*curr;
for(i = 0; i < skip; ++i){
image in = get_image_from_stream(cap);
free_image(in);
}
}
#endif
}
*/
/*
void network_detect(network *net, image im, float thresh, float hier_thresh, float nms, detection *dets)
{
network_predict_image(net, im);
layer l = net->layers[net->n-1];
int nboxes = num_boxes(net);
fill_network_boxes(net, im.w, im.h, thresh, hier_thresh, 0, 0, dets);
if (nms) do_nms_sort(dets, nboxes, l.classes, nms);
}
*/
void run_detector(int argc, char **argv)
{
char *prefix = find_char_arg(argc, argv, "-prefix", 0);
float thresh = find_float_arg(argc, argv, "-thresh", .5);
float hier_thresh = find_float_arg(argc, argv, "-hier", .5);
int cam_index = find_int_arg(argc, argv, "-c", 0);
int frame_skip = find_int_arg(argc, argv, "-s", 0);
int avg = find_int_arg(argc, argv, "-avg", 3);
if(argc < 4){
fprintf(stderr, "usage: %s %s [train/test/valid] [cfg] [weights (optional)]\n", argv[0], argv[1]);
return;
}
char *gpu_list = find_char_arg(argc, argv, "-gpus", 0);
char *outfile = find_char_arg(argc, argv, "-out", 0);
int *gpus = 0;
int gpu = 0;
int ngpus = 0;
if(gpu_list){
printf("%s\n", gpu_list);
int len = strlen(gpu_list);
ngpus = 1;
int i;
for(i = 0; i < len; ++i){
if (gpu_list[i] == ',') ++ngpus;
}
gpus = calloc(ngpus, sizeof(int));
for(i = 0; i < ngpus; ++i){
gpus[i] = atoi(gpu_list);
gpu_list = strchr(gpu_list, ',')+1;
}
} else {
gpu = gpu_index;
gpus = &gpu;
ngpus = 1;
}
int clear = find_arg(argc, argv, "-clear");
int fullscreen = find_arg(argc, argv, "-fullscreen");
int width = find_int_arg(argc, argv, "-w", 0);
int height = find_int_arg(argc, argv, "-h", 0);
int fps = find_int_arg(argc, argv, "-fps", 0);
//int class = find_int_arg(argc, argv, "-class", 0);
char *datacfg = argv[3];
char *cfg = argv[4];
char *weights = (argc > 5) ? argv[5] : 0;
char *filename = (argc > 6) ? argv[6]: 0;
if(0==strcmp(argv[2], "test")) test_detector(datacfg, cfg, weights, filename, thresh, hier_thresh, outfile, fullscreen);
else if(0==strcmp(argv[2], "train")) train_detector(datacfg, cfg, weights, gpus, ngpus, clear);
else if(0==strcmp(argv[2], "valid")) validate_detector(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "valid2")) validate_detector_flip(datacfg, cfg, weights, outfile);
else if(0==strcmp(argv[2], "recall")) validate_detector_recall(cfg, weights);
else if(0==strcmp(argv[2], "demo")) {
list *options = read_data_cfg(datacfg);
int classes = option_find_int(options, "classes", 20);
char *name_list = option_find_str(options, "names", "data/names.list");
char **names = get_labels(name_list);
demo(cfg, weights, thresh, cam_index, filename, names, classes, frame_skip, prefix, avg, hier_thresh, width, height, fps, fullscreen);
}
//else if(0==strcmp(argv[2], "extract")) extract_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
//else if(0==strcmp(argv[2], "censor")) censor_detector(datacfg, cfg, weights, cam_index, filename, class, thresh, frame_skip);
}
|
DenseTile.h | /******************************************************************************
* ** Copyright (c) 2016, Intel Corporation **
* ** All rights reserved. **
* ** **
* ** Redistribution and use in source and binary forms, with or without **
* ** modification, are permitted provided that the following conditions **
* ** are met: **
* ** 1. Redistributions of source code must retain the above copyright **
* ** notice, this list of conditions and the following disclaimer. **
* ** 2. Redistributions in binary form must reproduce the above copyright **
* ** notice, this list of conditions and the following disclaimer in the **
* ** documentation and/or other materials provided with the distribution. **
* ** 3. Neither the name of the copyright holder nor the names of its **
* ** contributors may be used to endorse or promote products derived **
* ** from this software without specific prior written permission. **
* ** **
* ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
* ** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
* ** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
* ** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
* ** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
* ** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
* ** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
* ** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
* ** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
* ** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
* ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* * ******************************************************************************/
/* Michael Anderson (Intel Corp.)
* * ******************************************************************************/
#ifndef SRC_DENSETILE_H_
#define SRC_DENSETILE_H_
#include <string>
#include <vector>
#include "src/bitvector.h"
template <typename T>
class DenseTile {
public:
std::string name;
int m;
int n;
int nnz;
int empty_flag;
int num_ints;
int * bit_vector;
T* value;
DenseTile() : name("TEMP"), m(0), n(0), nnz(0), empty_flag(true), num_ints(0) {}
DenseTile(int _m, int _n) : name("TEMP"), m(_m), n(_n), nnz(0), empty_flag(true) {
num_ints = (_m * _n + sizeof(int) * 8 - 1) / (sizeof(int) * 8);
}
DenseTile(edge_t<T>* edges, int _m, int _n, int _nnz, int row_start,
int col_start)
: name("TEMP"), m(_m), n(_n), nnz(_nnz), empty_flag(false) {
num_ints = (_m * _n + sizeof(int) * 8 - 1) / (sizeof(int) * 8);
alloc();
// convert to Dense
for (uint64_t i = 0; i < (uint64_t)nnz; i++) {
int src = edges[i].src - 1;
int dst = edges[i].dst - 1;
set_bitvector(src + dst * m, bit_vector);
value[src + dst * m] = edges[i].val;
}
}
void set(int idx, int idy, T val)
{
if(isEmpty()) {
alloc();
}
value[(idy-1) + (idx-1) * m] = val;
if(!get_bitvector((idy-1) + (idx-1) * m, bit_vector)) nnz++;
empty_flag = false;
set_bitvector((idy-1) + (idx-1) * m, bit_vector);
}
T get(int idx, int idy)
{
assert(!isEmpty());
return value[(idy-1) + (idx-1) * m];
}
void alloc() {
value = reinterpret_cast<T*>(
_mm_malloc((uint64_t)(m * n) * (uint64_t)sizeof(T), 64));
bit_vector = reinterpret_cast<int*>(
_mm_malloc((uint64_t)(num_ints) * (uint64_t)sizeof(int), 64));
memset(bit_vector, 0, (num_ints * sizeof(int)));
memset(value, 0, (m * n * sizeof(T)));
empty_flag = false;
nnz = 0;
}
bool isEmpty() const { return empty_flag; }
void get_edges(edge_t<T>* edges, int row_start, int col_start) {
int nnzcnt = 0;
for (int j = 0; j < n; j++) {
for (int i = 0; i < m; i++) {
if (get_bitvector(i + j * m, bit_vector)) {
edges[nnzcnt].src = i + 1;
edges[nnzcnt].dst = j + 1;
edges[nnzcnt].val = value[i + j * m];
nnzcnt++;
}
}
}
assert(nnzcnt == this->nnz);
}
DenseTile& operator=(DenseTile other) {
this->m = other.m;
this->n = other.n;
this->nnz = other.nnz;
this->empty_flag = other.empty_flag;
this->value = other.value;
this->bit_vector = other.bit_vector;
this->num_ints = other.num_ints;
}
void clear() {
if (!isEmpty()) {
_mm_free(value);
_mm_free(bit_vector);
}
nnz = 0;
empty_flag = true;
}
~DenseTile() {}
void send_tile_metadata(int myrank, int dst_rank, int output_rank) {
MPI_Send(&(nnz), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
MPI_Send(&(m), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
MPI_Send(&(n), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
MPI_Send(&(num_ints), 1, MPI_INT, dst_rank, 0, MPI_COMM_WORLD);
}
void recv_tile_metadata(int myrank, int src_rank, int output_rank) {
int new_nnz;
MPI_Recv(&(new_nnz), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD,
MPI_STATUS_IGNORE);
MPI_Recv(&(m), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&(n), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(&(num_ints), 1, MPI_INT, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
if (isEmpty()) {
value = reinterpret_cast<T*>(
_mm_malloc((uint64_t)(m * n) * (uint64_t)sizeof(T), 64));
bit_vector = reinterpret_cast<int*>(
_mm_malloc((uint64_t)(num_ints) * (uint64_t)sizeof(int), 64));
memset(bit_vector, 0, (num_ints * sizeof(int)));
empty_flag=false;
}
nnz = new_nnz;
}
void send_tile(int myrank, int dst_rank, int output_rank, bool block, std::vector<MPI_Request>* reqs) {
block = true;
if (!isEmpty()) {
/*
// Convert to edgelist
edge_t<T>* edges = new edge_t<T>[nnz];
int nzcnt = 0;
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
if (get_bitvector(i + j * m, bit_vector)) {
edges[nzcnt].src = i + 1;
edges[nzcnt].dst = j + 1;
edges[nzcnt].val = value[i + j * m];
nzcnt++;
}
}
}
*/
//assert(nzcnt == nnz);
if (block) {
//MPI_Send(edges, (uint64_t)nnz * sizeof(edge_t<T>), MPI_BYTE, dst_rank,
// 0, MPI_COMM_WORLD);
MPI_Send(value, (uint64_t) (m * n * sizeof(T)), MPI_BYTE,
dst_rank, 0, MPI_COMM_WORLD);
MPI_Send(bit_vector, (uint64_t) (num_ints) * sizeof(int),
MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD);
} else {
MPI_Request r1, r2;
//MPI_Isend(edges, (uint64_t)nnz * sizeof(edge_t<T>), MPI_BYTE, dst_rank,
// 0, MPI_COMM_WORLD, &r1);
MPI_Isend(value, (uint64_t) (m * n * sizeof(T)), MPI_BYTE,
dst_rank, 0, MPI_COMM_WORLD, &r1);
MPI_Isend(bit_vector, (uint64_t) (num_ints) * sizeof(int),
MPI_BYTE, dst_rank, 0, MPI_COMM_WORLD, &r2);
(*reqs).push_back(r1);
(*reqs).push_back(r2);
}
//delete[] edges;
}
}
void recv_tile(int myrank, int src_rank, int output_rank, bool block,
std::vector<MPI_Request>* reqs) {
block = true;
if (!isEmpty()) {
edge_t<T>* edges = new edge_t<T>[nnz];
if (block) {
//MPI_Recv(edges, (uint64_t)nnz * sizeof(edge_t<T>), MPI_BYTE, src_rank,
// 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(value, (uint64_t) (m * n * sizeof(T)), MPI_BYTE,
src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
MPI_Recv(bit_vector, (uint64_t) (num_ints * sizeof(int)),
MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
} else {
MPI_Request r1, r2;
//MPI_Irecv(edges, (uint64_t)nnz * sizeof(edge_t<T>), MPI_BYTE, src_rank,
// 0, MPI_COMM_WORLD, &r1);
MPI_Irecv(value, (uint64_t) (m * n * sizeof(T)), MPI_BYTE,
src_rank, 0, MPI_COMM_WORLD, &r1);
MPI_Irecv(bit_vector, (uint64_t) (num_ints * sizeof(int)),
MPI_BYTE, src_rank, 0, MPI_COMM_WORLD, &r2);
(*reqs).push_back(r1);
(*reqs).push_back(r2);
}
//#pragma omp parallel for
/*
for (int nz = 0; nz < nnz; nz++) {
int i = edges[nz].src - 1;
int j = edges[nz].dst - 1;
value[i + j * m] = edges[nz].val;
set_bitvector(i + j * m, bit_vector);
}
delete[] edges;
*/
}
}
};
#endif // SRC_DENSETILE_H_
|
mcrat.c | /*
# Program to run a Monte Carlo radiation transfer through the 2D
# simulations of GRB jets.
#
# Python code written by D. Lazzati at Oregonstate, C code written by Tyler Parsotan @ Oregon State
# ver 0.1 July 8, 2015
# ver 1.1 July 20, 2015: added record of number of scatterings, included
# all terms in weight. Should now give correct light curves.
# ver 1.2 July 21, 2015: added parameter file to keep track of input
# params of each simulation
# ver 2.0 July 22, 2015: corrected the problem that arises when there is
# no scattering in the time span of one frame. Fixed output arrays dimension.
# ver 2.1 July 25, 2015: fixed bug that did not make the number of
# scattering grow with the number of photons.
# ver 3.0 July 28, 2015: using scipy nearest neighbor interpolation to
# speed things up. Gained about factor 2
# ver 3.1 July 29, 2015: added radial spread of photon injection points
# ver 3.2 July 31, 2015: added Gamma to the weight of photons!!!
# ver 4.0 Aug 5, 2015: try to speed up by inverting cycle
# ver 4.1 Aug 8, 2015: add spherical test as an option
# ver 4.2 Aug 9, 2015: saving files appending rather than re-writing
# ver 4.3 Aug 11, 2015: corrected error in the calculation of the local temperature
# ver 4.4 Aug 13, 2015: added cylindrical test
# ver 4.5 Aug 18, 2015: fixd various problems pointed by the cylindrical test
# ver 4.6 Aug 21, 2015: corrected mean free path for large radii
# ver 5.0 Aug 25, 2015: corrected problem with high-T electrons and excess scatterings
# ver 5.1 Aug 25, 2015: cleaned-up coding
# ver 5.2 Sept 3, 2015: fixed problem with number of scatterings for multiple injections
*
* ver 6.0 Dec 28, 2016: rewrote the code in C, added checkpoint file so if the code is interrupted all the progress wont be lost, made the code only need to be compiled once for a given MC_XXX directory path
so you just need to supply the sub directory of MC_XXX as a command line argument
* version 7.0 used OpenMP to parallelize the code by angle and the function findminmfp()
version 8.0 added 3D capabilities for RIKEN hydro data and 2D capablities for RIKEN 2D hydro data and made it more efficient with grid selection to speed it up
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <dirent.h>
#include <math.h>
#include <gsl/gsl_rng.h>
#include "mclib_3d.h"
#include <omp.h>
#include "mpi.h"
/*
#define THISRUN "Science"
#define FILEPATH "/home/physics/parsotat/16OI/"
#define FILEROOT "rhd_jet_big_16OI_hdf5_plt_cnt_"
#define MC_PATH "MPI_CMC_16OI_SPHERICAL/"
#define THISRUN "Science"
#define FILEPATH "/Users/Tylerparsotan/Documents/Box Sync/1spike/"
#define FILEROOT "m0_rhop0.1big_hdf5_plt_cnt_"
#define MC_PATH "CMC_1spike/"
//#define MC_PATH "MC_16OI/Single_Photon_Cy_mc_total/"
* */
/*
#define THISRUN "Science"
#define FILEPATH "/home/physics/parsotat/16OM/"
#define FILEROOT "rhd_jet_big_16OM_hdf5_plt_cnt_"
#define MC_PATH "DIR_TEST/"
#define THISRUN "Science"
#define FILEPATH "/Volumes/DATA6TB/Collapsars/2D/HUGE_BOXES/VARY/40spikes/"
#define FILEROOT "m0_rhop0.1big_hdf5_plt_cnt_"
#define MC_PATH "CMC_40spikes_TEST/"
* */
#define THISRUN "Spherical"
#define FILEPATH "/Volumes/DATA6TB/Collapsars/2D/HUGE_BOXES/CONSTANT/16OI/"
//#define FILEPATH "/Users/Tylerparsotan//Documents/16OI_TEST/"
#define FILEROOT "rhd_jet_big_16OI_hdf5_plt_cnt_"
#define MC_PATH "TEST/"
#define MCPAR "mc.par"
#define RIKEN_SWITCH 0
int main(int argc, char **argv)
{
//compile each time a macro is changed, have to supply the subfolder within the MC_PATH directory as a command line argument to the C program eg. MCRAT 1/
// Define variables
char flash_prefix[200]="";
char mc_file[200]="" ;
char this_run[200]=THISRUN;
char *cyl="Cylindrical";
char *sph="Spherical";
char spect;//type of spectrum
char restrt;//restart or not
double fps, fps_modified, theta_jmin, theta_jmax, hydro_domain_y,hydro_domain_x ;//frames per second of sim, min opening angle of jet, max opening angle of jet in radians, max y value of fluid simulation domain
double inj_radius_small, inj_radius_large, ph_weight_suggest, ph_weight_small, ph_weight_large ;//radius at chich photons are injected into sim
int frm0,last_frm, frm2_small, frm2_large, j=0, min_photons, max_photons, frm0_small, frm0_large ;//frame starting from, last frame of sim, frame of last injection
int dim_switch=0;
int find_nearest_grid_switch=0;
int increment_inj=1, increment_scatt=1; //increments for injection loop and scattering loop, outer and inner loops respectively, the increment can change for RIKEN 3D hydro files
double inj_radius;
int frm2;
char mc_filename[200]="";
char mc_filename_2[200]="";
char mc_operation[200]="";
char mc_dir[200]="" ;
int file_count = 0;
DIR * dirp;
struct dirent * entry;
struct stat st = {0};
double theta_jmin_thread=0, theta_jmax_thread=0;
char flash_file[200]="";
char log_file[200]="";
FILE *fPtr=NULL; //pointer to log file for each thread
double *xPtr=NULL, *yPtr=NULL, *rPtr=NULL, *thetaPtr=NULL, *velxPtr=NULL, *velyPtr=NULL, *densPtr=NULL, *presPtr=NULL, *gammaPtr=NULL, *dens_labPtr=NULL;
double *szxPtr=NULL,*szyPtr=NULL, *tempPtr=NULL; //pointers to hold data from FLASH files
double *phiPtr=NULL, *velzPtr=NULL, *zPtr=NULL;
int num_ph=0, array_num=0, ph_scatt_index=0, max_scatt=0, min_scatt=0,i=0; //number of photons produced in injection algorithm, number of array elleemnts from reading FLASH file, index of photon whch does scattering, generic counter
double dt_max=0, thescatt=0, accum_time=0;
double gamma_infinity=0, time_now=0, time_step=0, avg_scatt=0, avg_r=0; //gamma_infinity not used?
double ph_dens_labPtr=0, ph_vxPtr=0, ph_vyPtr=0, ph_tempPtr=0, ph_vzPtr=0;;// *ph_cosanglePtr=NULL ;
double min_r=0, max_r=0, min_theta=0, max_theta=0;
int frame=0, scatt_frame=0, frame_scatt_cnt=0, scatt_framestart=0, framestart=0;
struct photon *phPtr=NULL; //pointer to array of photons
int num_thread=0, angle_count=0;
int num_angles=0, old_num_angle_procs=0; //old_num_angle_procs is to hold the old number of procs in each angle when cont sims, if restarting sims this gets set to angle_procs
int *frame_array=NULL, *proc_frame_array=NULL, *element_num=NULL, proc_frame_size=0;
double *thread_theta=NULL; //saves ranges of thetas for each thread to go through
double delta_theta=1;
int myid, numprocs, angle_procs, angle_id, procs_per_angle;
//new OpenMPI stuff
MPI_Init(NULL,NULL);
MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
//new muliple threads injecting and propagating photons
const gsl_rng_type *rng_t;
gsl_rng *rng;
gsl_rng_env_setup();
rng_t = gsl_rng_ranlxs0;
rng = gsl_rng_alloc (rng_t); //initalize random number generator to seed the others with random numbers
//want to break up simulation by angle and injection frame & have each thread save data in its own folder
//have each thread check if its directory is made and if its restarting (delete evrything) or if its continuing with a previous simulation
//the angle and the injection frames will be the names of mc_dir, therefore read mc.par first in MC_XXX directory
//make strings of proper directories etc.
snprintf(flash_prefix,sizeof(flash_prefix),"%s%s",FILEPATH,FILEROOT );
snprintf(mc_file,sizeof(flash_prefix),"%s%s%s",FILEPATH, MC_PATH,MCPAR);
printf(">> mc.py: Reading mc.par: %s\n", mc_file);
readMcPar(mc_file, &hydro_domain_x, &hydro_domain_y, &fps, &theta_jmin, &theta_jmax, &delta_theta, &inj_radius_small,&inj_radius_large, &frm0_small,&frm0_large, &last_frm ,&frm2_small, &frm2_large, &ph_weight_small, &ph_weight_large, &min_photons, &max_photons, &spect, &restrt, &num_thread,&dim_switch); //thetas that comes out is in degrees
//printf("%c\n", restrt);
//divide up angles and frame injections among threads DONT WANT NUMBER OF THREADS TO BE ODD
//assign ranges to array that hold them
//leave angles in degrees here
num_angles=(int) (((theta_jmax-theta_jmin)/delta_theta)) ;//*(180/M_PI));
thread_theta=malloc( num_angles *sizeof(double) );
*(thread_theta+0)=theta_jmin;//*(180/M_PI);
//printf("%e\n", *(thread_theta+0));
for (j=1;j<(num_angles); j++)
{
*(thread_theta+j)=*(thread_theta+(j-1))+delta_theta;
//printf("%e\n", *(thread_theta+j));
}
//make comm without the procs that deal with angle
//comm for angles
procs_per_angle= numprocs/num_angles;
//printf("%d\n", procs_per_angle);
MPI_Comm angle_comm;
if (restrt=='r') //uncomment this when I run MCRAT for sims that didnt originally save angle_procs
{
MPI_Comm_split(MPI_COMM_WORLD, myid/procs_per_angle , myid, &angle_comm);
MPI_Comm_rank(angle_comm, &angle_id);
MPI_Comm_size(angle_comm, &angle_procs);
//printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n", myid, numprocs, angle_id, angle_procs);
theta_jmin_thread= (*(thread_theta+ (myid/procs_per_angle))) *(M_PI/180);
theta_jmax_thread= theta_jmin_thread+(delta_theta*(M_PI/180));
snprintf(mc_dir,sizeof(flash_prefix),"%s%s%0.1lf-%0.1lf/",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this
old_num_angle_procs=angle_procs;
}
else
{
MPI_Group sub_world_group;
MPI_Comm sub_world_comm;
int incl_procs[procs_per_angle*num_angles], count, sub_world_id;
int total_num_to_restart=0;
int color=1;
int *all_cont_process_idPtr=NULL, *each_num_to_restart_per_anglePtr=NULL, *tmp=NULL;
//for restart='c' case if the number of processes isnt a multiple of procs_per_angle*num_angles make a comm out of those that are in order to analyze files and count number of processes for each angle range need to con't
count=0;
for (j=0;j<numprocs;j++)
{
if (j<procs_per_angle*num_angles)
{
incl_procs[count]=j;
count++;
}
}
if (myid<procs_per_angle*num_angles)
{
int myid_2=0;
// Get the group of processes in MPI_COMM_WORLD and make a sub group to go through checkpoint files
MPI_Group world_group;
MPI_Comm root_angle_comm;
MPI_Comm_group(MPI_COMM_WORLD, &world_group);
MPI_Group_incl(world_group, procs_per_angle*num_angles, incl_procs, &sub_world_group);
MPI_Comm_create_group(MPI_COMM_WORLD, sub_world_group, 0, &sub_world_comm);
MPI_Comm_rank(sub_world_comm, &myid_2);
MPI_Comm_split(sub_world_comm, myid_2/procs_per_angle , myid_2, &angle_comm);
MPI_Comm_rank(angle_comm, &angle_id);
MPI_Comm_size(angle_comm, &angle_procs);
//create group of all the processes that have angle_id==0
if (angle_id==0)
{
color=0; //set different color for root processes in each group of angle_comm
}
MPI_Comm_split(sub_world_comm, color , myid_2, &root_angle_comm); //create comm to exchange info about number of processes to restart for each angle range
printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n", myid, numprocs, angle_id, angle_procs);
theta_jmin_thread= (*(thread_theta+ (myid_2/procs_per_angle))) *(M_PI/180);
theta_jmax_thread= theta_jmin_thread+(delta_theta*(M_PI/180));
snprintf(mc_dir,sizeof(flash_prefix),"%s%s%0.1lf-%0.1lf/",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this
//call the function to count the num of processes for each angle range that need to be con't
int count_cont_procs=0, total_cont_procs_angle=0, global_cont_procs=0;
int *cont_proc_idsPtr=NULL, *total_cont_procs_angle_Ptr=NULL, *displPtr=NULL; //becomes the size of the number of old procceses
int *cont_proc_ids_anglePtr=NULL;
old_num_angle_procs=getOrigNumProcesses(&count_cont_procs, &cont_proc_idsPtr, mc_dir, angle_id, angle_procs, last_frm, dim_switch, RIKEN_SWITCH);
if (old_num_angle_procs==-1)
{
printf("MCRAT wasnt able to get a value of old_num_angle_procs to continue the simulation. Now exiting to prevent data corruption.\n" );
MPI_Abort(MPI_COMM_WORLD, 1);
}
total_cont_procs_angle_Ptr=malloc(angle_procs*sizeof(int));
displPtr=malloc(angle_procs*sizeof(int));
MPI_Gather(&count_cont_procs,1,MPI_INT, total_cont_procs_angle_Ptr, 1, MPI_INT, 0,angle_comm );//hold the number of elements that each process will send the root process
MPI_Barrier(angle_comm);
MPI_Barrier(sub_world_comm);
if (angle_id==0)
{
printf("Angle_procs: %d 1st gather: %d, %d, %d\n", angle_procs, *(total_cont_procs_angle_Ptr), *(total_cont_procs_angle_Ptr+1), *(total_cont_procs_angle_Ptr+2));
}
MPI_Reduce(&count_cont_procs, &total_cont_procs_angle, 1, MPI_INT, MPI_SUM, 0, angle_comm); //for each angle sum the number of procs to continue and pass it to the root for angle_comm
cont_proc_ids_anglePtr=malloc(total_cont_procs_angle*sizeof(int)); //each root proc in angle comm has to hold the id's of the old set of processes to cont
*(displPtr+0)=0;
if (angle_id==0)
{
for (j=1;j<angle_procs;j++)
{
*(displPtr+j)=(*(displPtr+j-1))+(*(total_cont_procs_angle_Ptr+j-1 )); //set the displacement for each proces to put its vector of pprocess IDs that need to be continued
printf("Displacement: %d\n", *(displPtr+j));
}
}
MPI_Gatherv(cont_proc_idsPtr,count_cont_procs,MPI_INT, cont_proc_ids_anglePtr, total_cont_procs_angle_Ptr, displPtr , MPI_INT, 0,angle_comm ); //send the vectors with the ids of the old processes that need to be cont to root in angle_comm
MPI_Barrier(angle_comm);
MPI_Barrier(sub_world_comm);
if (angle_id==0)
{
printf("Total Cont Procs: %d\n", total_cont_procs_angle);
for (j=0;j<total_cont_procs_angle;j++)
{
{
printf("ID: %d\n", *(cont_proc_ids_anglePtr+j));
}
}
}
//each root for angle_comm has the number of processes each angle range needs to restart and the array of what the IDs of those processes used to be
//now have to combine all that info for rank 0 in MPI_COMM_WORLD and then end it to all processes in MPI_COMM_WORLD
//if (myid==0)
{
free(displPtr);
displPtr=NULL;
//initalize variables to hold all data
each_num_to_restart_per_anglePtr=malloc(num_angles*sizeof(int));
displPtr=malloc(num_angles*sizeof(int));
*(displPtr+0)=0;
}
MPI_Barrier(angle_comm);
MPI_Barrier(sub_world_comm);
if (angle_id==0)
{
//this is the part where all the root processes of angle_comm transfer thier info to the root proc of MPI_WORLD
MPI_Reduce(&total_cont_procs_angle, &total_num_to_restart, 1, MPI_INT, MPI_SUM, 0, root_angle_comm); //for each angle sum the number of procs to continue and pass it to the root for MPI_COMM_WORLD
MPI_Gather(&total_cont_procs_angle,1,MPI_INT, each_num_to_restart_per_anglePtr, 1, MPI_INT, 0,root_angle_comm );//hold the number of elements that each process sent the root for MPI_COMM_WORLD
if (myid==0)
{
for (j=1;j<num_angles;j++)
{
*(displPtr+j)=(*(displPtr+j-1))+(*(each_num_to_restart_per_anglePtr+j-1 )); //set the displacement for each proces to put its vector of pprocess IDs that need to be continued
}
}
all_cont_process_idPtr=malloc(total_num_to_restart*sizeof(int));
MPI_Gatherv(cont_proc_ids_anglePtr, total_cont_procs_angle, MPI_INT, all_cont_process_idPtr, each_num_to_restart_per_anglePtr, displPtr, MPI_INT, 0, root_angle_comm);
}
MPI_Barrier(angle_comm);
MPI_Barrier(sub_world_comm);
if (myid==0)
{
printf("Global Cont Procs: %d\n", total_num_to_restart);
for (j=0;j<total_num_to_restart;j++)
{
{
printf("Global ID: %d\n", *(all_cont_process_idPtr+j));
}
}
}
//destroy the old comms
MPI_Barrier(angle_comm);
MPI_Barrier(sub_world_comm);
//destroy current angle comm and recreate a new one
MPI_Comm_free(&root_angle_comm);
MPI_Comm_free(&angle_comm);
MPI_Comm_free(&sub_world_comm);
MPI_Group_free(&sub_world_group);
MPI_Group_free(&world_group);
free(cont_proc_idsPtr);
free(cont_proc_ids_anglePtr);
free(total_cont_procs_angle_Ptr);
free(displPtr);
//free(each_num_to_restart_per_anglePtr);
//free(all_cont_process_idPtr);
}
//send all of myid==0 data to all processes in MPI_COMM_WORLD
MPI_Bcast( &total_num_to_restart, 1, MPI_INT, 0, MPI_COMM_WORLD );
if (total_num_to_restart>0)
{
if (myid != 0 )
{
//allocate data of appropriate size for all processes to hold the data from MPI_Bcast
tmp=realloc(all_cont_process_idPtr,total_num_to_restart *sizeof(int));
if (tmp!=NULL)
{
all_cont_process_idPtr=tmp;
}
else
{
printf("Error with reserving space to hold data about restarting process ID's\n");
}
//free(tmp);
tmp=realloc(each_num_to_restart_per_anglePtr, num_angles*sizeof(int));
if (tmp!=NULL)
{
each_num_to_restart_per_anglePtr=tmp;
}
else
{
printf("Error with reserving space to hold data about restarting process numbers for each angle range\n");
}
//free(tmp);
}
MPI_Bcast( all_cont_process_idPtr, total_num_to_restart, MPI_INT, 0, MPI_COMM_WORLD );
MPI_Bcast( each_num_to_restart_per_anglePtr, num_angles, MPI_INT, 0, MPI_COMM_WORLD );
MPI_Bcast( &old_num_angle_procs, 1, MPI_INT, 0, MPI_COMM_WORLD );
MPI_Barrier(MPI_COMM_WORLD);
if (myid==numprocs-1)
{
printf("Number of processes: %d\n", old_num_angle_procs);
printf("restarting process numbers for each angle range: %d, %d, %d\n", *(each_num_to_restart_per_anglePtr), *(each_num_to_restart_per_anglePtr+1), *(each_num_to_restart_per_anglePtr+2));
}
//assign proper number of processes to each angle range to con't sims and then reset angle_id to original value from when simulation was first started
color=0; //by default all processes have this value
count=0;
for (j=0;j<num_angles;j++)
{
if (myid>=count && myid<count+(*(each_num_to_restart_per_anglePtr+j)) )
{
color=j;
}
count+=(*(each_num_to_restart_per_anglePtr+j));
printf("Myid: %d, Color: %d, Count %d, Num To Start Per Angle: %d\n", myid, color, count, (*(each_num_to_restart_per_anglePtr+j)));
}
MPI_Comm_split(MPI_COMM_WORLD, color , myid, &angle_comm);
MPI_Comm_rank(angle_comm, &angle_id);
MPI_Comm_size(angle_comm, &angle_procs);
printf("WORLD RANK/SIZE: %d/%d \t ROW RANK/SIZE: %d/%d\n", myid, numprocs, angle_id, angle_procs);
angle_procs=old_num_angle_procs;
//reset the angle for each process
theta_jmin_thread= (*(thread_theta+ color)) *(M_PI/180);
theta_jmax_thread= theta_jmin_thread+(delta_theta*(M_PI/180));
//reset the angle_id for each process
count=0;
for (j=0;j<color;j++)
{
count+=(*(each_num_to_restart_per_anglePtr+j));
}
angle_id=(*(all_cont_process_idPtr+count+angle_id));
snprintf(mc_dir,sizeof(flash_prefix),"%s%s%0.1lf-%0.1lf/",FILEPATH,MC_PATH, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI ); //have to add angle into this
}
else
{
//if there are no more processes to continue just break up processes normally so they read in checkpoint files of completed processes and jump to merging files
MPI_Comm_split(MPI_COMM_WORLD, myid/procs_per_angle , myid, &angle_comm);
MPI_Comm_rank(angle_comm, &angle_id);
MPI_Comm_size(angle_comm, &angle_procs);
}
free(all_cont_process_idPtr);
free(each_num_to_restart_per_anglePtr);
}
MPI_Barrier(MPI_COMM_WORLD);
if ((theta_jmin_thread >= 0) && (theta_jmax_thread <= (2*M_PI/180) )) //if within small angle (0-2 degrees) use _small inj_radius and frm2 have to think about this for larger domains
{
inj_radius=inj_radius_small;
frm2=frm2_small;
frm0=frm0_small;
ph_weight_suggest=ph_weight_small;
}
else
{
inj_radius=inj_radius_large;
frm2=frm2_large;
frm0=frm0_large;
ph_weight_suggest=ph_weight_large;
}
//make vector to hold the frames we are injecting in, vector should have (frm2-frm0)/angle_procs slots, if fps is const
proc_frame_size=ceil((frm2-frm0)/ (float) angle_procs);
frame_array=malloc(((frm2-frm0)+1)*sizeof(int));
for (j=0;j<((frm2-frm0)+1); j++)
{
*(frame_array+j)=frm0+j ;
//printf("proc: %d frame: %d\n", angle_id, *(frame_array+j));
}
{
//set this now incase there is no checkpoint file, then this wont be overwritten and the corretc values will be passed even if the user decides to restart
framestart=(*(frame_array +(angle_id*proc_frame_size)));
scatt_framestart=framestart;
if (angle_id != (angle_procs-1))
{
frm2=(*(frame_array +((angle_id*proc_frame_size) + proc_frame_size-1) )); //section off blocks of the frame_array to give to each angle_id
}
else
{
frm2=(*(frame_array + (frm2-frm0) )); //if angle_id is last give it the last set, even if its uneven
}
if (restrt=='c')
{
printf(">> mc.py: Reading checkpoint\n");
//#pragma omp critical
readCheckpoint(mc_dir, &phPtr, &frm2, &framestart, &scatt_framestart, &num_ph, &restrt, &time_now, angle_id, &angle_procs, dim_switch, RIKEN_SWITCH);
/*
for (i=0;i<num_ph;i++)
{
printf("%e,%e,%e, %e,%e,%e, %e, %e\n",(phPtr+i)->p0, (phPtr+i)->p1, (phPtr+i)->p2, (phPtr+i)->p3, (phPtr+i)->r0, (phPtr+i)->r1, (phPtr+i)->r2, (phPtr+i)->num_scatt );
}
*/
if (restrt=='c')
{
printf(">> Rank %d: Starting from photons injected at frame: %d out of %d\n", angle_id,framestart, frm2);
printf(">> Rank %d with angles %0.1lf-%0.1lf: Continuing scattering %d photons from frame: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,num_ph, scatt_framestart);
printf(">> Rank %d with angles %0.1lf-%0.1lf: The time now is: %e\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,time_now);
}
else
{
printf(">> Rank %d with angles %0.1lf-%0.1lf: Continuing simulation by injecting photons at frame: %d out of %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,framestart, frm2); //starting with new photon injection is same as restarting sim
}
}
else if ((stat(mc_dir, &st) == -1) && (restrt=='r'))
{
mkdir(mc_dir, 0777); //make the directory with full permissions
}
else
{
if (angle_id==0)
{
printf(">> proc %d with angles %0.1lf-%0.1lf: Cleaning directory \n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI);
dirp = opendir(mc_dir);
while ((entry = readdir(dirp)) != NULL)
{
if (entry->d_type == DT_REG) { /* If the entry is a regular file */
file_count++; //count how many files are in dorectory
}
}
printf("File count %d\n", file_count);
if (file_count>0)
{
for (i=0;i<=last_frm;i++)
{
snprintf(mc_filename,sizeof(mc_filename),"%s%s%d%s", mc_dir,"mcdata_",i,"_P0.dat");
//snprintf(mc_filename_2,sizeof(mc_filename),"%s%s%d%s", mc_dir,"mcdata_",i,"_P0_0.dat");
for (j=0;j<angle_procs;j++)
{
snprintf(mc_filename_2,sizeof(mc_filename),"%s%s%d%s%d%s", mc_dir,"mcdata_",i,"_P0_",j, ".dat");
if(( access( mc_filename, F_OK ) != -1 ) || ( access( mc_filename_2, F_OK ) != -1 ) )
{
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s%d%s","exec rm ", mc_dir,"mcdata_",i,"_*.dat"); //prepares string to remove *.dat in mc_dir
//printf("%s\n",mc_operation);
system(mc_operation);
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s%d%s","exec rm ", mc_dir,"mcdata_",i,"_*");
system(mc_operation);
}
}
}
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mcdata_PW_*.dat"); //prepares string to remove *.dat in mc_dir
system(mc_operation);
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mcdata_PW.dat"); //prepares string to remove *.dat in mc_dir
system(mc_operation);
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mc_chkpt_*.dat"); //prepares string to remove *.dat in mc_dir
system(mc_operation);
snprintf(mc_operation,sizeof(flash_prefix),"%s%s%s","exec rm ", mc_dir,"mc_output_*.log"); //prepares string to remove *.log in mc_dir
system(mc_operation);
}
}
}
if ((RIKEN_SWITCH==1) && (dim_switch==1) && (framestart>=3000))
{
increment_inj=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1
fps_modified=1;
}
else
{
increment_inj=1;
fps_modified=fps;
}
dt_max=1.0/fps_modified;
MPI_Barrier(angle_comm);
snprintf(log_file,sizeof(log_file),"%s%s%d%s",mc_dir,"mc_output_", angle_id,".log" );
printf("%s\n",log_file);
fPtr=fopen(log_file, "a");
printf( "Im Proc %d with angles %0.1lf-%0.1lf proc_frame_size is %d Starting on Frame: %d Injecting until %d scatt_framestart: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, proc_frame_size, framestart, frm2, scatt_framestart);
fprintf(fPtr, "Im Proc %d with angles %0.1lf-%0.1lf Starting on Frame: %d scatt_framestart: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, framestart, scatt_framestart);
fflush(fPtr);
free(frame_array);
//for a checkpoint implementation, start from the last saved "frame" value and go to the saved "frm2" value
//#pragma omp for
for (frame=framestart;frame<=frm2;frame=frame+increment_inj)
{
if ((RIKEN_SWITCH==1) && (dim_switch==1) && (frame>=3000))
{
increment_inj=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1
fps_modified=1;
}
else
{
increment_inj=1;
fps_modified=fps;
}
if (restrt=='r')
{
time_now=frame/fps; //for a checkpoint implmentation, load the saved "time_now" value when reading the ckeckpoint file otherwise calculate it normally
}
//printf(">> mc.py: Working on Frame %d\n", frame);
fprintf(fPtr,"Im Proc: %d with angles %0.1lf - %0.1lf Working on Frame: %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frame);
fflush(fPtr);
if (restrt=='r')
{
if (dim_switch==0)
{
if (RIKEN_SWITCH==0)
{
//if using FLASH data for 2D
//put proper number at the end of the flash file
modifyFlashName(flash_file, flash_prefix, frame, dim_switch);
fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf: Opening FLASH file %s\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, flash_file);
fflush(fPtr);
readAndDecimate(flash_file, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, min_theta, max_theta, fPtr);
}
else
{
//if using RIKEN hydro data for 2D szx becomes delta r szy becomes delta theta
readHydro2D(FILEPATH, frame, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fPtr);
//fprintf(fPtr, "%d\n\n", array_num);
}
}
else
{
fprintf(fPtr,">> Im Proc: %d with angles %0.1lf-%0.1lf\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI);
fflush(fPtr);
read_hydro(FILEPATH, frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 1, min_r, max_r, fps_modified, fPtr);
}
//check for run type
if(strcmp(cyl, this_run)==0)
{
//printf("In cylindrical prep\n");
cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num);
}
else if (strcmp(sph, this_run)==0)
{
printf("In Spherical\n");
sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num , fPtr);
}
//determine where to place photons and how many should go in a given place
//for a checkpoint implmentation, dont need to inject photons, need to load photons' last saved data
fprintf(fPtr,">> Proc: %d with angles %0.1lf-%0.1lf: Injecting photons\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI);
fflush(fPtr);
if (dim_switch==0)
{
photonInjection(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps_modified, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, szxPtr, szyPtr,rPtr,thetaPtr, tempPtr, velxPtr, velyPtr,rng, RIKEN_SWITCH, fPtr );
}
else
{
photonInjection3D(&phPtr, &num_ph, inj_radius, ph_weight_suggest, min_photons, max_photons,spect, array_num, fps_modified, theta_jmin_thread, theta_jmax_thread, xPtr, yPtr, zPtr, szxPtr, szyPtr,rPtr,thetaPtr, phiPtr, tempPtr, velxPtr, velyPtr, velzPtr, rng, fPtr);
}
//printf("This many Photons: %d\n",num_ph); //num_ph is one more photon than i actually have
//for (i=0;i<num_ph;i++)
// printf("%e,%e,%e \n",(phPtr+i)->r0, (phPtr+i)->r1, (phPtr+i)->r2 );
}
//scatter photons all the way thoughout the jet
//for a checkpoint implmentation, start from the last saved "scatt_frame" value eh start_frame=frame or start_frame=cont_frame
if (restrt=='r')
{
scatt_framestart=frame; //have to make sure that once the inner loop is done and the outer loop is incrememnted by one the inner loop starts at that new value and not the one read by readCheckpoint()
}
for (scatt_frame=scatt_framestart;scatt_frame<=last_frm;scatt_frame=scatt_frame+increment_scatt)
{
if ((RIKEN_SWITCH==1) && (dim_switch==1) && (scatt_frame>=3000))
{
increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1
fps_modified=1; //therefore dt between files become 1 second
}
else
{
increment_scatt=1;
fps_modified=fps;
}
dt_max=1.0/fps_modified; //if working with RIKEN files and scatt_frame>=3000 dt is 1 second between each subsequent frame
fprintf(fPtr,">>\n");
fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Working on photons injected at frame: %d out of %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,frame, frm2);
fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: %s - Working on frame %d\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, THISRUN, scatt_frame);
fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: Opening file...\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI);
fflush(fPtr);
//set new seed to increase randomness?
gsl_rng_set(rng, gsl_rng_get(rng));
if (dim_switch==0)
{
if (RIKEN_SWITCH==0)
{
//put proper number at the end of the flash file
modifyFlashName(flash_file, flash_prefix, scatt_frame, dim_switch);
phMinMax(phPtr, num_ph, &min_r, &max_r, &min_theta, &max_theta, fPtr);
readAndDecimate(flash_file, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, min_theta, max_theta, fPtr);
}
else
{
phMinMax(phPtr, num_ph, &min_r, &max_r, &min_theta, &max_theta, fPtr);
//if using RIKEN hydro data for 2D szx becomes delta r szy becomes delta theta
readHydro2D(FILEPATH, scatt_frame, inj_radius, fps_modified, &xPtr, &yPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &velxPtr, &velyPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fPtr);
}
}
else
{
phMinMax(phPtr, num_ph, &min_r, &max_r, &min_theta, &max_theta, fPtr);
read_hydro(FILEPATH, scatt_frame, inj_radius, &xPtr, &yPtr, &zPtr, &szxPtr, &szyPtr, &rPtr,\
&thetaPtr, &phiPtr, &velxPtr, &velyPtr, &velzPtr, &densPtr, &presPtr, &gammaPtr, &dens_labPtr, &tempPtr, &array_num, 0, min_r, max_r, fps_modified, fPtr);
}
fprintf(fPtr, "Number of Flash Elements %d\n", array_num);
//check for run type
if(strcmp(cyl, this_run)==0)
{
//printf("In cylindrical prep\n");
cylindricalPrep(gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num);
}
else if (strcmp(sph, this_run)==0)
{
sphericalPrep(rPtr, xPtr, yPtr,gammaPtr, velxPtr, velyPtr, densPtr, dens_labPtr, presPtr, tempPtr, array_num, fPtr );
}
//printf("The result of read and decimate are arrays with %d elements\n", array_num);
fprintf(fPtr,">> Proc %d with angles %0.1lf-%0.1lf: propagating and scattering %d photons\n",angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI,num_ph);
fflush(fPtr);
frame_scatt_cnt=0;
find_nearest_grid_switch=1; // set to true so the function findNearestPropertiesAndMinMFP by default finds the index of the grid block closest to each photon since we just read in a file and the prior index is invalid
while (time_now<((scatt_frame+increment_scatt)/fps))
{
//if simulation time is less than the simulation time of the next frame, keep scattering in this frame
//for RIKEN hydro data, theres still 10 fps but after frame 3000, file increment is 10 not 1, therefore modify dt_max not fps
//go through each photon and find blocks closest to each photon and properties of those blocks to calulate mean free path
//and choose the photon with the smallest mfp and calculate the timestep
ph_scatt_index=findNearestPropertiesAndMinMFP(phPtr, num_ph, array_num, hydro_domain_x, hydro_domain_y, &time_step, xPtr, yPtr, zPtr, szxPtr, szyPtr, velxPtr, velyPtr, velzPtr, dens_labPtr, tempPtr,\
&ph_dens_labPtr, &ph_vxPtr, &ph_vyPtr, &ph_vzPtr, &ph_tempPtr, rng, dim_switch, find_nearest_grid_switch, RIKEN_SWITCH, fPtr);
find_nearest_grid_switch=0; //set to zero (false) since we do not absolutely need to refind the index, this makes the function findNearestPropertiesAndMinMFP just check if the photon is w/in the given grid box still
//fprintf(fPtr, "In main: %e, %d, %e, %e\n",((phPtr+ph_scatt_index)->num_scatt), ph_scatt_index, time_step, time_now);
//fflush(fPtr);
if (time_step<dt_max)
{
//update number of scatterings and time
((phPtr+ph_scatt_index)->num_scatt)+=1;
frame_scatt_cnt+=1;
time_now+=time_step;
updatePhotonPosition(phPtr, num_ph, time_step, fPtr);
//scatter the photon
//fprintf(fPtr, "Passed Parameters: %e, %e, %e\n", (ph_vxPtr), (ph_vyPtr), (ph_tempPtr));
photonScatter( (phPtr+ph_scatt_index), (ph_vxPtr), (ph_vyPtr),ph_vzPtr, (ph_tempPtr), rng, dim_switch, fPtr );
if (frame_scatt_cnt%1000 == 0)
{
fprintf(fPtr,"Scattering Number: %d\n", frame_scatt_cnt);
fprintf(fPtr,"The local temp is: %e\n", (ph_tempPtr));
fprintf(fPtr,"Average photon energy is: %e\n", averagePhotonEnergy(phPtr, num_ph)); //write function to average over the photons p0 and then do (*3e10/1.6e-9)
fprintf(fPtr,"The last time step was: %e.\nThe time now is: %e\n", time_step,time_now);
fflush(fPtr);
}
}
else
{
time_now+=dt_max;
//for each photon update its position based on its momentum
updatePhotonPosition(phPtr, num_ph, dt_max, fPtr);
}
//printf("In main 2: %e, %d, %e, %e\n", ((phPtr+ph_scatt_index)->num_scatt), ph_scatt_index, time_step, time_now);
}
//get scattering statistics
phScattStats(phPtr, num_ph, &max_scatt, &min_scatt, &avg_scatt, &avg_r);
fprintf(fPtr,"The number of scatterings in this frame is: %d\n", frame_scatt_cnt);
fprintf(fPtr,"The last time step was: %e.\nThe time now is: %e\n", time_step,time_now);
fprintf(fPtr,"The maximum number of scatterings for a photon is: %d\nThe minimum number of scattering for a photon is: %d\n", max_scatt, min_scatt);
fprintf(fPtr,"The average number of scatterings thus far is: %lf\nThe average position of photons is %e\n", avg_scatt, avg_r);
fflush(fPtr);
printPhotons(phPtr, num_ph, scatt_frame , frame, mc_dir, angle_id);
//exit(0);
fprintf(fPtr, ">> Proc %d with angles %0.1lf-%0.1lf: Making checkpoint file\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI);
fflush(fPtr);
fprintf(fPtr, " mc_dir: %s\nframe %d\nfrm2: %d\nscatt_frame: %d\n num_photon: %d\ntime_now: %e\nlast_frame: %d\n", mc_dir, frame, frm2, scatt_frame, num_ph, time_now, last_frm );
fflush(fPtr);
saveCheckpoint(mc_dir, frame, frm2, scatt_frame, num_ph, time_now, phPtr, last_frm, angle_id, old_num_angle_procs);
if (dim_switch==1)
{
if (RIKEN_SWITCH==1)
{
free(zPtr);free(phiPtr);free(velzPtr);
zPtr=NULL; phiPtr=NULL; velzPtr=NULL;
}
}
free(xPtr);free(yPtr);free(szxPtr);free(szyPtr);free(rPtr);free(thetaPtr);free(velxPtr);free(velyPtr);free(densPtr);free(presPtr);
free(gammaPtr);free(dens_labPtr);free(tempPtr);
xPtr=NULL; yPtr=NULL; rPtr=NULL;thetaPtr=NULL;velxPtr=NULL;velyPtr=NULL;densPtr=NULL;presPtr=NULL;gammaPtr=NULL;dens_labPtr=NULL;
szxPtr=NULL; szyPtr=NULL; tempPtr=NULL;
}
restrt='r';//set this to make sure that the next iteration of propogating photons doesnt use the values from the last reading of the checkpoint file
free(phPtr);
phPtr=NULL;
}
saveCheckpoint(mc_dir, frame, frm2, scatt_frame, 0, time_now, phPtr, last_frm, angle_id, old_num_angle_procs); //this is for processes using the old code that didnt restart efficiently
fprintf(fPtr, "Process %d has completed the MC calculation.\n", angle_id);
fflush(fPtr);
}//end omp parallel inner section
MPI_Barrier(angle_comm);
//merge files from each worker thread within a directory
{
increment_scatt=1;
file_count=0;
//count number of files
for (i=frm0;i<=last_frm;i=i+increment_scatt)
{
if ((RIKEN_SWITCH==1) && (dim_switch==1) && (i>=3000))
{
increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1
}
file_count++;
}
//holds number of files for each process to merge
MPI_Comm_size(angle_comm, &angle_procs); //to get the proper number of processes within the group
MPI_Comm_rank(angle_comm, &angle_id); //reset the value of angle_id to what it should actualy be to properly distribute files to merge
proc_frame_size=floor(file_count/ (float) angle_procs);
frame_array=malloc(file_count*sizeof(int));
proc_frame_array=malloc(angle_procs*sizeof(int)); //sets index of each proceesed acquired value
element_num=malloc(angle_procs*sizeof(int));
for (i=0;i<angle_procs;i++)
{
*(proc_frame_array+i)=i*proc_frame_size;
*(element_num+i)=1;
}
//make vector with the files in order to pass them to each of the processes
increment_scatt=1;
file_count=0;
for (i=frm0;i<=last_frm;i=i+increment_scatt)
{
if ((RIKEN_SWITCH==1) && (dim_switch==1) && (i>=3000))
{
increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1
}
*(frame_array+file_count)=i ;
file_count++;
//printf("file_count: %d frame: %d\n", file_count-1, *(frame_array+file_count-1));
}
//pass first frame number that each rpocess should start to merge, can calulate the file it should merge until
MPI_Scatterv(frame_array, element_num, proc_frame_array, MPI_INT, &frm0, 1, MPI_INT, 0, angle_comm);
//fprintf(fPtr, "Value: last_frm: ,%d\n", file_count);
//fflush(fPtr);
//make sure all files get merged by giving the rest to the last process
if (angle_id==angle_procs-1)
{
proc_frame_size=file_count-proc_frame_size*(angle_procs-1); //for last process take over the remaining number of files
}
//calculate what the last file the preocess should merge up to
i=0;
last_frm=frm0;
while(i<proc_frame_size)
{
if ((RIKEN_SWITCH==1) && (dim_switch==1) && (last_frm>=3000))
{
increment_scatt=10; //when the frame ==3000 for RIKEN 3D hydro files, increment file numbers by 10 instead of by 1
}
else
{
increment_scatt=1;
}
last_frm+=increment_scatt;
i++;
}
//if (angle_id==0)
{
//fprintf(fPtr, ">> Proc %d with angles %0.1lf-%0.1lf: Merging Files from %d to %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm0, last_frm);
fprintf(fPtr, ">> Proc %d with angles %0.1lf-%0.1lf: Merging Files from %d to %d\n", angle_id, theta_jmin_thread*180/M_PI, theta_jmax_thread*180/M_PI, frm0, last_frm);
fflush(fPtr);
dirFileMerge(mc_dir, frm0, last_frm, old_num_angle_procs, angle_id, dim_switch, RIKEN_SWITCH, fPtr);
}
}
fprintf(fPtr, "Process %d has completed merging files.\n", angle_id);
fflush(fPtr);
fclose(fPtr);
gsl_rng_free (rng);
MPI_Finalize();
//free(rng);
//free(thread_theta);
return 0;
}
|
fib.c | #include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <omp.h>
#include <sys/time.h>
static uint64_t par_res, seq_res;
int cutoff_value;
#define FIB_NUM_PRECOMP 50
uint64_t fib_results[FIB_NUM_PRECOMP] = { /*{{{*/
0,
1,
1,
2,
3,
5,
8,
13,
21,
34,
55,
89,
144,
233,
377,
610,
987,
1597,
2584,
4181,
6765,
10946,
17711,
28657,
46368,
75025,
121393,
196418,
317811,
514229,
832040,
1346269,
2178309,
3524578,
5702887,
9227465,
14930352,
24157817,
39088169,
63245986,
102334155,
165580141,
267914296,
433494437,
701408733,
1134903170,
1836311903,
2971215073,
4807526976,
7778742049
}; /*}}}*/
// Forward declarations
static uint64_t fib_seq(int n);
static uint64_t fib(int n, int d);
int main(int argc, char** argv);
/*static*/ uint64_t fib(int n, int d)
{ /*{{{*/
uint64_t x, y;
if (n < 2)
return n;
if (d < cutoff_value)
{
#pragma omp task shared(x) firstprivate(n, d)
x = fib(n - 1, d + 1);
#pragma omp task shared(y) firstprivate(n, d)
y = fib(n - 2, d + 1);
#pragma omp taskwait
}
else
{
x = fib_seq(n - 1);
y = fib_seq(n - 2);
}
return x + y;
} /*}}}*/
static uint64_t fib_seq(int n)
{ /*{{{*/
if (n < 2)
return n;
return fib_seq(n - 1) + fib_seq(n - 2);
} /*}}}*/
long get_usecs(void)
{/*{{{*/
struct timeval t;
gettimeofday(&t, ((void *) 0));
return t.tv_sec * 1000000 + t.tv_usec;
}/*}}}*/
int main(int argc, char** argv)
{ /*{{{*/
if (argc > 3)
{
fprintf(stderr, "Usage: %s number cut_off\n", argv[0]);
exit(1);
}
cutoff_value = 15;
int num = 42;
if (argc > 1)
num = atoi(argv[1]);
if (argc > 2)
cutoff_value = atoi(argv[2]);
fprintf(stderr, "Computing fib %d %d ...\n", num, cutoff_value);
long par_time_start = get_usecs();
#pragma omp parallel
{
#pragma omp single
{
#pragma omp task
par_res = fib(num, 0);
#pragma omp taskwait
}
}
long par_time_end = get_usecs();
double par_time = (double)(par_time_end - par_time_start) / 1000000;
fprintf(stderr, "Execution time = %f s\n", par_time);
#ifdef CHECK_RESULT
fprintf(stderr, "Checking ...\n");
if (num > FIB_NUM_PRECOMP)
seq_res = fib_seq(num);
else {
long seq_time_start = get_usecs();
seq_res = fib_results[num];
long seq_time_end = get_usecs();
double seq_time = (double)(seq_time_end - seq_time_start) / 1000000;
fprintf(stderr, "Seq. execution time = %f s\n", seq_time);
}
if (par_res == seq_res)
fprintf(stderr, "%s(%d,%d), check result = %s\n", argv[0], num, cutoff_value, "SUCCESS");
else
fprintf(stderr, "%s(%d,%d), check result = %s\n", argv[0], num, cutoff_value, "FAILURE");
#endif
return 0;
} /*}}}*/
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) {
for (t4=max(max(max(0,ceild(3*t1-127,128)),ceild(24*t2-Nz-508,512)),ceild(8*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(12*t1+Nx+21,512)),floord(24*t2+Nx+20,512)),floord(8*t3+Nx+4,512)),floord(24*t1-24*t2+Nz+Nx+19,512));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),512*t4+510),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
GB_binop__bclr_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bclr_uint8
// A.*B function (eWiseMult): GB_AemultB__bclr_uint8
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bclr_uint8
// C+=b function (dense accum): GB_Cdense_accumb__bclr_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bclr_uint8
// C=scalar+B GB_bind1st__bclr_uint8
// C=scalar+B' GB_bind1st_tran__bclr_uint8
// C=A+scalar GB_bind2nd__bclr_uint8
// C=A'+scalar GB_bind2nd_tran__bclr_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_BITCLR (aij, bij, uint8_t, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_BITCLR (x, y, uint8_t, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BCLR || GxB_NO_UINT8 || GxB_NO_BCLR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bclr_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bclr_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bclr_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__bclr_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bclr_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bclr_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = GB_BITCLR (x, bij, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bclr_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = GB_BITCLR (aij, y, uint8_t, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (x, aij, uint8_t, 8) ; \
}
GrB_Info GB_bind1st_tran__bclr_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_BITCLR (aij, y, uint8_t, 8) ; \
}
GrB_Info GB_bind2nd_tran__bclr_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
streaming_find_most_influential.h | //===------------------------------------------------------------*- C++ -*-===//
//
// Ripples: A C++ Library for Influence Maximization
// Marco Minutoli <marco.minutoli@pnnl.gov>
// Pacific Northwest National Laboratory
//
//===----------------------------------------------------------------------===//
//
// Copyright (c) 2019, Battelle Memorial Institute
//
// Battelle Memorial Institute (hereinafter Battelle) hereby grants permission
// to any person or entity lawfully obtaining a copy of this software and
// associated documentation files (hereinafter “the Software”) to redistribute
// and use the Software in source and binary forms, with or without
// modification. Such person or entity may use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and may permit
// others to do so, subject to the following conditions:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimers.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// 3. Other than as used herein, neither the name Battelle Memorial Institute or
// Battelle may be used in any form whatsoever without the express written
// consent of Battelle.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL BATTELLE OR CONTRIBUTORS BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
//===----------------------------------------------------------------------===//
#ifndef RIPPLES_STREAMING_FIND_MOST_INFLUENTIAL_H
#define RIPPLES_STREAMING_FIND_MOST_INFLUENTIAL_H
#include <cstddef>
#include <queue>
#include <utility>
#include <vector>
#include "omp.h"
#include "ripples/generate_rrr_sets.h"
#include "ripples/partition.h"
#ifdef RIPPLES_ENABLE_CUDA
#include "ripples/cuda/cuda_utils.h"
#include "ripples/cuda/find_most_influential.h"
#endif
namespace ripples {
template <typename GraphTy>
class FindMostInfluentialWorker {
public:
using rrr_set_iterator = typename RRRsets<GraphTy>::iterator;
using vertex_type = typename GraphTy::vertex_type;
virtual ~FindMostInfluentialWorker() {}
virtual PartitionIndices<rrr_set_iterator> LoadData(rrr_set_iterator B,
rrr_set_iterator E) = 0;
virtual void InitialCount() = 0;
virtual void UpdateCounters(vertex_type last_seed) = 0;
virtual void ReduceCounters(size_t step) = 0;
virtual void set_first_rrr_set(rrr_set_iterator I) = 0;
virtual bool has_work() = 0;
};
#ifdef RIPPLES_ENABLE_CUDA
template <typename GraphTy>
class GPUFindMostInfluentialWorker : public FindMostInfluentialWorker<GraphTy> {
public:
using rrr_set_iterator =
typename FindMostInfluentialWorker<GraphTy>::rrr_set_iterator;
using vertex_type = typename GraphTy::vertex_type;
GPUFindMostInfluentialWorker(size_t device_number, size_t num_nodes,
std::vector<uint32_t *> &device_counters,
size_t reduction_target, size_t reduction_step,
uint32_t *d_counters_dest)
: device_number_(device_number),
d_counters_(device_counters[device_number]),
d_rr_vertices_(nullptr),
d_rr_edges_(nullptr),
d_mask_(nullptr),
d_rr_set_size_(0),
num_nodes_(num_nodes),
reduction_target_(reduction_target),
reduction_step_(reduction_step),
d_counters_dest_(d_counters_dest) {
cuda_set_device(device_number);
cuda_stream_create(&stream_);
if (reduction_target_ != device_number) {
cuda_enable_p2p(reduction_target_);
}
}
virtual ~GPUFindMostInfluentialWorker() {
cuda_set_device(device_number_);
if (reduction_target_ != device_number_) {
cuda_disable_p2p(reduction_target_);
}
cuda_stream_destroy(stream_);
cuda_free(d_pool_);
// cuda_free(d_rr_vertices_);
// cuda_free(d_rr_edges_);
// cuda_free(d_mask_);
}
void set_first_rrr_set(rrr_set_iterator I) {}
bool has_work() { return d_rr_set_size_ != 0; }
PartitionIndices<rrr_set_iterator> LoadData(rrr_set_iterator B,
rrr_set_iterator E) {
cuda_set_device(device_number_);
// Ask runtime available memory. The best thing we can do is guessing.
// Memory fragmentation might get in the way, so we ask the runtime
// for what is free and then ask for half of that.
size_t avail_space = cuda_available_memory() >> 1;
bool allocSuccess = cuda_malloc(reinterpret_cast<void **>(&d_pool_), avail_space);
assert(allocSuccess &&
"Not enough memory on the GPUs. Our heuristic for acquiring memory"
"to perferm seed-selection failed. Please, re-run the application"
"using --seed-select-max-gpu-workers 0.");
cuda_memset(reinterpret_cast<void *>(d_pool_), 0, avail_space);
size_t space = 0;
auto pivot = B;
size_t num_elements = 0;
for (; pivot < E && space < avail_space; ++pivot) {
// Two uint32_t per the RRR sets + 1 byte for the mask.
num_elements += pivot->size();
space += pivot->size() * sizeof(uint32_t) + sizeof(uint32_t);
}
// cuda_malloc(reinterpret_cast<void **>(&d_mask_), std::distance(B, pivot));
d_mask_ = d_pool_;
// cuda_memset(reinterpret_cast<void *>(d_mask_), 0, std::distance(B, pivot));
// cuda_check(__FILE__, __LINE__);
space -= sizeof(uint32_t) * std::distance(B, pivot);
size_t BufferSize = 1 << 24;
// cuda_malloc(reinterpret_cast<void **>(&d_rr_edges_), space >> 1);
d_rr_edges_ = d_mask_ + std::distance(B, pivot);
d_rr_vertices_ = d_rr_edges_ + num_elements;
// cuda_malloc(reinterpret_cast<void **>(&d_rr_vertices_), space >> 1);
std::vector<uint32_t> rr_edges_buffer_to_load;
std::vector<uint32_t> rr_edges_buffer_to_send;
rr_edges_buffer_to_load.reserve(BufferSize);
rr_edges_buffer_to_send.reserve(BufferSize);
std::vector<uint32_t> rr_vertices_buffer_to_load;
std::vector<uint32_t> rr_vertices_buffer_to_send;
rr_vertices_buffer_to_load.reserve(BufferSize);
rr_vertices_buffer_to_send.reserve(BufferSize);
uint32_t id = 0;
auto to_copy = B;
size_t elements_to_copy = num_elements;
uint32_t *d_rrr_index = d_rr_vertices_;
uint32_t *d_rrr_sets = d_rr_edges_;
for (; to_copy < pivot; ++to_copy, ++id) {
if (rr_edges_buffer_to_send.size() > BufferSize) break;
rr_edges_buffer_to_send.insert(rr_edges_buffer_to_send.end(),
to_copy->begin(), to_copy->end());
rr_vertices_buffer_to_send.insert(rr_vertices_buffer_to_send.end(),
to_copy->size(), id);
elements_to_copy -= to_copy->size();
d_rr_set_size_ += to_copy->size();
}
while (elements_to_copy > 0) {
cuda_h2d(reinterpret_cast<void *>(d_rrr_sets),
reinterpret_cast<void *>(rr_edges_buffer_to_send.data()),
sizeof(uint32_t) * rr_edges_buffer_to_send.size(), stream_);
cuda_h2d(reinterpret_cast<void *>(d_rrr_index),
reinterpret_cast<void *>(rr_vertices_buffer_to_send.data()),
sizeof(uint32_t) * rr_vertices_buffer_to_send.size(), stream_);
for (; to_copy < pivot; ++to_copy, ++id) {
if (rr_edges_buffer_to_load.size() > BufferSize) break;
rr_edges_buffer_to_load.insert(rr_edges_buffer_to_load.end(),
to_copy->begin(), to_copy->end());
rr_vertices_buffer_to_load.insert(rr_vertices_buffer_to_load.end(),
to_copy->size(), id);
elements_to_copy -= to_copy->size();
d_rr_set_size_ += to_copy->size();
}
cuda_sync(stream_);
d_rrr_index += rr_vertices_buffer_to_send.size();
d_rrr_sets += rr_edges_buffer_to_send.size();
rr_vertices_buffer_to_send.swap(rr_vertices_buffer_to_load);
rr_edges_buffer_to_send.swap(rr_edges_buffer_to_load);
rr_vertices_buffer_to_load.clear();
rr_edges_buffer_to_load.clear();
}
if (rr_vertices_buffer_to_send.size() > 0) {
cuda_h2d(reinterpret_cast<void *>(d_rrr_index),
reinterpret_cast<void *>(rr_vertices_buffer_to_send.data()),
sizeof(uint32_t) * rr_vertices_buffer_to_send.size(), stream_);
cuda_h2d(reinterpret_cast<void *>(d_rrr_sets),
reinterpret_cast<void *>(rr_edges_buffer_to_send.data()),
sizeof(uint32_t) * rr_edges_buffer_to_send.size(), stream_);
cuda_sync(stream_);
}
return PartitionIndices<rrr_set_iterator>(B, E, pivot);
}
void InitialCount() {
cuda_set_device(device_number_);
cuda_memset(d_counters_, 0, num_nodes_ * sizeof(uint32_t), stream_);
CudaCountOccurrencies(d_counters_, d_rr_edges_, d_rr_set_size_, num_nodes_,
stream_);
cuda_sync(stream_);
}
void UpdateCounters(vertex_type last_seed) {
cuda_set_device(device_number_);
CudaUpdateCounters(stream_, d_rr_set_size_, d_rr_vertices_, d_rr_edges_,
d_mask_, d_counters_, num_nodes_, last_seed);
cuda_sync(stream_);
}
void ReduceCounters(size_t step) {
if (step != reduction_step_) return;
cuda_set_device(device_number_);
// Accumulate in target array.
CudaReduceCounters(stream_, d_counters_, d_counters_dest_, num_nodes_);
}
private:
cudaStream_t stream_;
size_t device_number_;
size_t reduction_step_;
size_t reduction_target_;
uint32_t *d_counters_;
uint32_t *d_counters_dest_;
uint32_t *d_rr_vertices_;
uint32_t *d_rr_edges_;
uint32_t *d_pool_;
size_t d_rr_set_size_;
uint32_t *d_mask_;
size_t num_nodes_;
};
#endif
template <typename GraphTy>
class CPUFindMostInfluentialWorker : public FindMostInfluentialWorker<GraphTy> {
using vertex_type = typename GraphTy::vertex_type;
using rrr_set_iterator =
typename FindMostInfluentialWorker<GraphTy>::rrr_set_iterator;
public:
CPUFindMostInfluentialWorker(
std::vector<vertex_type> &global_count,
std::vector<std::pair<vertex_type, size_t>> &queue_storage,
rrr_set_iterator begin, rrr_set_iterator end, size_t num_threads,
uint32_t *d_cpu_counters)
: global_count_(global_count),
queue_storage_(queue_storage),
begin_(begin),
end_(end),
num_threads_(num_threads),
d_cpu_counters_(d_cpu_counters) {}
virtual ~CPUFindMostInfluentialWorker() {}
PartitionIndices<rrr_set_iterator> LoadData(rrr_set_iterator B,
rrr_set_iterator E) {
return PartitionIndices<rrr_set_iterator>(end_, end_, end_);
}
bool has_work() { return begin_ != end_; }
void set_first_rrr_set(rrr_set_iterator I) { begin_ = I; }
void InitialCount() {
CountOccurrencies(begin_, end_, global_count_.begin(), global_count_.end(),
num_threads_);
// We have GPU workers so we won't use the heap.
if (d_cpu_counters_ != nullptr) return;
InitHeapStorage(global_count_.begin(), global_count_.end(),
queue_storage_.begin(), queue_storage_.end(), num_threads_);
}
void UpdateCounters(vertex_type last_seed) {
if (!has_work()) return;
auto cmp = [=](const RRRset<GraphTy> &a) -> auto {
return !std::binary_search(a.begin(), a.end(), last_seed);
};
auto itr = partition(begin_, end_, cmp, num_threads_);
if (std::distance(itr, end_) < std::distance(begin_, itr)) {
ripples::UpdateCounters(itr, end_, global_count_, num_threads_);
} else {
#pragma omp parallel for simd num_threads(num_threads_)
for (size_t i = 0; i < global_count_.size(); ++i) global_count_[i] = 0;
CountOccurrencies(begin_, itr, global_count_.begin(), global_count_.end(),
num_threads_);
}
end_ = itr;
}
void ReduceCounters(size_t step) {
#ifdef RIPPLES_ENABLE_CUDA
if (step == 1 && has_work()) {
cuda_set_device(size_t(0));
cuda_h2d(reinterpret_cast<void *>(d_cpu_counters_),
reinterpret_cast<void *>(global_count_.data()),
sizeof(uint32_t) * global_count_.size());
}
#endif
}
private:
std::vector<vertex_type> &global_count_;
std::vector<std::pair<vertex_type, size_t>> &queue_storage_;
rrr_set_iterator begin_;
rrr_set_iterator end_;
size_t num_threads_;
uint32_t *d_cpu_counters_;
};
template <typename GraphTy>
struct CompareHeap {
using vertex_type = typename GraphTy::vertex_type;
bool operator()(std::pair<vertex_type, size_t> &a,
std::pair<vertex_type, size_t> &b) {
return a.second < b.second;
}
};
template <typename GraphTy>
class StreamingFindMostInfluential {
using vertex_type = typename GraphTy::vertex_type;
using worker_type = FindMostInfluentialWorker<GraphTy>;
using cpu_worker_type = CPUFindMostInfluentialWorker<GraphTy>;
#ifdef RIPPLES_ENABLE_CUDA
using gpu_worker_type = GPUFindMostInfluentialWorker<GraphTy>;
#endif
using rrr_set_iterator =
typename FindMostInfluentialWorker<GraphTy>::rrr_set_iterator;
CompareHeap<GraphTy> cmpHeap;
using priorityQueue =
std::priority_queue<std::pair<vertex_type, size_t>,
std::vector<std::pair<vertex_type, size_t>>,
decltype(cmpHeap)>;
public:
StreamingFindMostInfluential(const GraphTy &G, RRRsets<GraphTy> &RRRsets,
size_t num_max_cpus, size_t num_gpus)
: num_cpu_workers_(num_max_cpus),
num_gpu_workers_(num_gpus),
workers_(),
vertex_coverage_(G.num_nodes()),
queue_storage_(G.num_nodes()),
d_counters_(num_gpus, 0),
RRRsets_(RRRsets),
reduction_steps_(1),
d_cpu_counters_(nullptr) {
#ifdef RIPPLES_ENABLE_CUDA
// Get Number of device and allocate 1 thread each.
// num_gpu_workers_ = cuda_num_devices();
num_cpu_workers_ -= num_gpu_workers_;
std::fill(vertex_coverage_.begin(), vertex_coverage_.end(), 0);
// Allocate Counters
if (num_gpu_workers_ > 0) {
#pragma omp parallel num_threads(num_gpu_workers_)
{
size_t rank = omp_get_thread_num();
cuda_set_device(rank);
cuda_malloc(reinterpret_cast<void **>(&d_counters_[rank]),
sizeof(uint32_t) * G.num_nodes());
if (rank == 0) {
cuda_malloc(reinterpret_cast<void **>(&d_cpu_counters_),
sizeof(uint32_t) * G.num_nodes());
}
}
}
#endif
workers_.push_back(new CPUFindMostInfluentialWorker<GraphTy>(
vertex_coverage_, queue_storage_, RRRsets_.begin(), RRRsets_.end(),
num_cpu_workers_, d_cpu_counters_));
#ifdef RIPPLES_ENABLE_CUDA
if (num_gpu_workers_ == 0) return;
// Define Reduction tree on GPU workers.
auto tree = cuda_get_reduction_tree();
// Construct GPU workers
for (size_t i = 0; i < num_gpu_workers_; ++i) {
reduction_steps_ = std::max(reduction_steps_, tree[i].second);
uint32_t *dest = i == 0 ? d_cpu_counters_ : d_counters_[tree[i].first];
workers_.push_back(new GPUFindMostInfluentialWorker<GraphTy>(
i, G.num_nodes(), d_counters_, tree[i].first, tree[i].second, dest));
}
#endif
}
~StreamingFindMostInfluential() {
#ifdef RIPPLES_ENABLE_CUDA
for (auto b : d_counters_) {
cuda_free(b);
}
if (num_gpu_workers_ > 0) cuda_free(d_cpu_counters_);
#endif
for (auto w : workers_) {
delete w;
}
}
void InitialCount() {
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
workers_[rank]->InitialCount();
}
}
void ReduceCounters() {
if (num_gpu_workers_ == 0) return;
if (!workers_[0]->has_work() && num_gpu_workers_ == 1) return;
for (ssize_t i = reduction_steps_; i >= 0; --i) {
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
if (workers_[rank]->has_work()) {
workers_[rank]->ReduceCounters(i);
}
}
}
}
void UpdateCounters(vertex_type last_seed) {
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
workers_[rank]->UpdateCounters(last_seed);
}
}
priorityQueue getHeap() {
priorityQueue queue(cmpHeap, std::move(queue_storage_));
return queue;
}
std::pair<vertex_type, size_t> getNextSeed(priorityQueue &queue_) {
#ifdef RIPPLES_ENABLE_CUDA
if (num_gpu_workers_ != 0) {
ReduceCounters();
uint32_t *global_counter = d_counters_[0];
if (workers_[0]->has_work()) global_counter = d_cpu_counters_;
cuda_set_device(0);
auto result = CudaMaxElement(global_counter, vertex_coverage_.size());
return result;
}
#endif
while (!queue_.empty()) {
auto element = queue_.top();
queue_.pop();
if (element.second > vertex_coverage_[element.first]) {
element.second = vertex_coverage_[element.first];
queue_.push(element);
continue;
}
return element;
}
throw std::logic_error("Reached a mighty Unreachable State");
}
void LoadDataToDevice() {
if (num_gpu_workers_ == 0) return;
std::vector<PartitionIndices<rrr_set_iterator>> indices(num_gpu_workers_);
#pragma omp parallel num_threads(num_gpu_workers_ + 1)
{
size_t rank = omp_get_thread_num();
if (rank != 0) {
size_t threadnum = omp_get_thread_num() - 1,
numthreads = omp_get_num_threads() - 1;
size_t low = RRRsets_.size() * threadnum / numthreads,
high = RRRsets_.size() * (threadnum + 1) / numthreads;
indices[threadnum] = workers_[rank]->LoadData(
RRRsets_.begin() + low,
std::min(RRRsets_.end(), RRRsets_.begin() + high));
}
}
size_t num_threads = num_gpu_workers_;
for (size_t j = 1; j < num_threads; j <<= 1) {
#pragma omp parallel num_threads(num_threads >> j)
{
#pragma omp for schedule(dynamic)
for (size_t i = 0; i < (num_threads - j); i += j * 2) {
indices[i] = indices[i].mergeBlocks(indices[i + j],
std::min(2 * j, num_threads));
}
}
}
workers_[0]->set_first_rrr_set(indices[0].pivot);
}
auto find_most_influential_set(size_t k) {
omp_set_max_active_levels(2);
LoadDataToDevice();
InitialCount();
auto queue = getHeap();
std::vector<vertex_type> result;
result.reserve(k);
size_t uncovered = RRRsets_.size();
std::chrono::duration<double, std::milli> seedSelection(0);
while (uncovered != 0) {
auto start = std::chrono::high_resolution_clock::now();
auto element = getNextSeed(queue);
auto end = std::chrono::high_resolution_clock::now();
seedSelection += end - start;
uncovered -= element.second;
result.push_back(element.first);
if (result.size() == k) break;
UpdateCounters(element.first);
}
double f = double(RRRsets_.size() - uncovered) / RRRsets_.size();
omp_set_max_active_levels(1);
return std::make_pair(f, result);
}
private:
size_t num_cpu_workers_, num_gpu_workers_;
ssize_t reduction_steps_;
RRRsets<GraphTy> &RRRsets_;
std::vector<worker_type *> workers_;
std::vector<uint32_t *> d_counters_;
uint32_t *d_cpu_counters_;
std::vector<uint32_t> vertex_coverage_;
std::vector<std::pair<vertex_type, size_t>> queue_storage_;
};
} // namespace ripples
#endif
|
Evaluate.h | /*
Copyright (c) 2005-2016, University of Oxford.
All rights reserved.
University of Oxford means the Chancellor, Masters and Scholars of the
University of Oxford, having an administrative office at Wellington
Square, Oxford OX1 2JD, UK.
This file is part of Aboria.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the University of Oxford nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef EVALUATE_H_
#define EVALUATE_H_
#include "Symbolic.h"
#include "detail/Evaluate.h"
namespace Aboria {
/// Evaluates a non-linear operator \p expr over a set of particles
/// given by label \p label and stores the result, using the functor
/// \p Functor, in variable with type \p VariableType
template <typename VariableType, typename Functor, typename ExprRHS,
typename LabelType>
void evaluate_nonlinear(ExprRHS const &expr, LabelType &label) {
typedef typename VariableType::value_type value_type;
typedef typename LabelType::particles_type particles_type;
typedef typename particles_type::position position;
typedef
typename proto::matches<ExprRHS,
detail::is_not_aliased<VariableType, LabelType>>
not_aliased;
typedef typename LabelType::particles_type particles_type;
particles_type &particles = label.get_particles();
// check expr is a univariate expression and that it refers to the same
// particles container
check_valid_assign_expr(label, expr);
// if aliased then need to copy to a tempory buffer first
std::vector<value_type> &buffer =
(not_aliased::value) ? get<VariableType>(particles)
: get<VariableType>(label.get_buffers());
buffer.resize(particles.size());
// evaluate expression for all particles and store in buffer
const size_t n = particles.size();
Functor functor;
#ifdef HAVE_OPENMP
#pragma omp parallel for
#endif
for (size_t i = 0; i < n; i++) {
buffer[i] =
functor(get<VariableType>(particles)[i], eval(expr, particles[i]));
}
// if aliased then copy back from the buffer
if (not_aliased::value == false) {
const size_t n = particles.size();
#ifdef HAVE_OPENMP
#pragma omp parallel for
#endif
for (size_t i = 0; i < n; i++) {
get<VariableType>(particles[i]) = buffer[i];
}
}
if (boost::is_same<VariableType, position>::value) {
particles.update_positions();
}
if (boost::is_same<VariableType, alive>::value) {
particles.update_positions();
}
}
/*
/// Evaluates a matrix-free linear operator given by \p expr \p if_expr,
/// and particle sets \p a and \p b on a vector rhs and
/// accumulates the result in vector lhs
template<typename Expr,
typename IfExpr,
typename ParticlesTypeA,
typename ParticlesTypeB,
typename VectorLHS,
typename VectorRHS
>
void evaluate_linear(Expr &expr,
IfExpr &if_expr,
const ParticlesTypeA &a,
const ParticlesTypeB &b,
VectorLHS &lhs, const VectorRHS &rhs) {
typedef typename ParticlesTypeA::double_d double_d;
typedef typename ParticlesTypeA::position position;
const size_t na = a.size();
const size_t nb = b.size();
if (is_trivially_zero(expr) || is_trivially_false(if_expr)) {
//std::cout << "zero a x b block" <<std::endl;
return;
}
if (is_trivially_true(if_expr)) {
//std::cout << "dense "<<na<<" x "<<nb<<" block" <<std::endl;
ASSERT(!a.get_periodic().any(),"periodic does not work with dense");
const size_t parallel_size = 20;
const size_t block_size = 20;
if (na > parallel_size) {
#pragma omp parallel for
for (size_t i=0; i<na; ++i) {
typename ParticlesTypeA::const_reference ai = a[i];
double sum = 0;
for (size_t j=0; j<nb; ++j) {
typename ParticlesTypeB::const_reference bj = b[j];
sum +=
eval(expr,get<position>(bj)-get<position>(ai),ai,bj)*rhs(j);
}
lhs[i] += sum;
}
} else {
for (size_t i=0; i<na; ++i) {
typename ParticlesTypeA::const_reference ai = a[i];
double sum = 0;
for (size_t j=0; j<nb; ++j) {
typename ParticlesTypeB::const_reference bj = b[j];
//std::cout << "a = "<<get<position>(ai)<<" b =
"<<get<position>(bj)<<std::endl;
//std::cout << "using dx =
"<<get<position>(bj)-get<position>(ai)<<" rhs(j) = "<<rhs(j)<<" eval =
"<<eval(expr,get<position>(bj)-get<position>(ai),ai,bj)<<std::endl; sum +=
eval(expr,get<position>(bj)-get<position>(ai),ai,bj)*rhs[j];
}
lhs[i] += sum;
}
}
} else {
//std::cout << "sparse a x b block" <<std::endl;
#pragma omp parallel for
for (size_t i=0; i<na; ++i) {
typename ParticlesTypeA::const_reference ai = a[i];
double sum = 0;
//std::cout << "evaluating fucntion for particle at
"<<get<position>(ai)<<std::endl; for (auto pairj:
box_search(b.get_query(),get<position>(ai))) { const double_d & dx =
tuple_ns::get<1>(pairj); typename ParticlesTypeB::const_reference bj =
tuple_ns::get<0>(pairj);
//std::cout << "looking at particle with dx = "<<dx<<std::endl;
const size_t j = &get<position>(bj) - get<position>(b).data();
if (eval(if_expr,dx,ai,bj)) {
//std::cout <<"if expression is true. eval =
"<<eval(expr,dx,ai,bj)<<std::endl; sum += eval(expr,dx,ai,bj)*rhs(j);
}
}
lhs[i] += sum;
}
}
}
template<typename Expr,
typename IfExpr,
typename ParticlesTypeA,
typename ParticlesTypeB,
typename Triplet
>
void assemble(Expr &expr,
IfExpr &if_expr,
const ParticlesTypeA &a,
const ParticlesTypeB &b,
std::vector<Triplet>& triplets,
const size_t startI=0, const size_t startJ=0) {
typedef typename ParticlesTypeB::double_d double_d;
typedef typename ParticlesTypeB::position position;
const size_t na = a.size();
const size_t nb = b.size();
if (is_trivially_zero(expr) || is_trivially_false(if_expr)) {
//zero a x b block
return;
}
if (is_trivially_true(if_expr)) {
//dense a x b block
//std::cout << "dense a x b block" << std::endl;
ASSERT(!a.get_periodic().any(),"periodic does not work with dense");
for (size_t i=0; i<na; ++i) {
typename ParticlesTypeA::const_reference ai = a[i];
for (size_t j=0; j<nb; ++j) {
typename ParticlesTypeB::const_reference bj = b[j];
triplets.push_back(Triplet(i+startI,j+startJ,eval(expr,get<position>(bj)-get<position>(ai),ai,bj)));
}
}
} else {
//sparse a x b block
//std::cout << "sparse a x b block" << std::endl;
for (size_t i=0; i<na; ++i) {
typename ParticlesTypeA::const_reference ai = a[i];
for (auto pairj: box_search(b.get_query(),get<position>(ai))) {
const double_d & dx = tuple_ns::get<1>(pairj);
typename ParticlesTypeB::const_reference bj =
tuple_ns::get<0>(pairj); const size_t j = &get<position>(bj) -
get<position>(b).data(); if (eval(if_expr,dx,ai,bj)) {
triplets.push_back(Triplet(i+startI,j+startJ,eval(expr,dx,ai,bj)));
}
}
}
}
}
template<typename Expr,
typename IfExpr,
typename ParticlesTypeA,
typename ParticlesTypeB,
typename MatrixType
>
void assemble(Expr &expr,
IfExpr &if_expr,
const ParticlesTypeA &a,
const ParticlesTypeB &b,
const MatrixType &matrix) {
typedef typename ParticlesTypeB::double_d double_d;
typedef typename ParticlesTypeB::position position;
const size_t na = a.size();
const size_t nb = b.size();
if (is_trivially_zero(expr) || is_trivially_false(if_expr)) {
//zero a x b block
// hack from
https://eigen.tuxfamily.org/dox/TopicFunctionTakingEigenTypes.html const_cast<
MatrixType& >(matrix).setZero(); return;
}
if (is_trivially_true(if_expr)) {
//dense a x b block
ASSERT(!a.get_periodic().any(),"periodic does not work with dense");
for (size_t i=0; i<na; ++i) {
typename ParticlesTypeA::const_reference ai = a[i];
for (size_t j=0; j<nb; ++j) {
typename ParticlesTypeB::const_reference bj = b[j];
const double_d dx = get<position>(bj)-get<position>(ai);
const_cast< MatrixType& >(matrix)(i,j) = eval(expr,dx,ai,bj);
}
}
} else {
//sparse a x b block
for (size_t i=0; i<na; ++i) {
typename ParticlesTypeA::const_reference ai = a[i];
for (auto pairj: box_search(b.get_query(),get<position>(ai))) {
const double_d & dx = tuple_ns::get<1>(pairj);
typename ParticlesTypeB::const_reference bj =
tuple_ns::get<0>(pairj); const size_t j = &get<position>(bj) -
get<position>(b).data(); if (eval(if_expr,dx,ai,bj)) { const_cast< MatrixType&
>(matrix)(i,j) = eval(expr,dx,ai,bj); } else { const_cast< MatrixType&
>(matrix)(i,j) = 0;
}
}
}
}
}
*/
} // namespace Aboria
#endif
|
correlation.origin.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) (((n)<0) ? -((-(n))/(d)) : ((n)+(d)-1)/(d))
#define floord(n,d) (((n)<0) ? -((-(n)+(d)-1)/(d)) : (n)/(d))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/**
* This version is stamped on May 10, 2016
*
* Contact:
* Louis-Noel Pouchet <pouchet.ohio-state.edu>
* Tomofumi Yuki <tomofumi.yuki.fr>
*
* Web address: http://polybench.sourceforge.net
*/
/* correlation.c: this file is part of PolyBench/C */
#include <math.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
#include "correlation.h"
/* Array initialization. */
static void init_array(int m, int n, DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data, N, M, n, m)) {
int i, j;
*float_n = (DATA_TYPE)N;
for (i = 0; i < N; i++)
for (j = 0; j < M; j++)
data[i][j] = (DATA_TYPE)(i * j) / M + i;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static void print_array(int m, DATA_TYPE POLYBENCH_2D(corr, M, M, m, m))
{
int i, j;
POLYBENCH_DUMP_START;
POLYBENCH_DUMP_BEGIN("corr");
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
if ((i * m + j) % 20 == 0)
fprintf(POLYBENCH_DUMP_TARGET, "\n");
fprintf(POLYBENCH_DUMP_TARGET, DATA_PRINTF_MODIFIER, corr[i][j]);
}
POLYBENCH_DUMP_END("corr");
POLYBENCH_DUMP_FINISH;
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static void kernel_correlation(int m, int n, DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data, N, M, n, m),
DATA_TYPE POLYBENCH_2D(corr, M, M, m, m),
DATA_TYPE POLYBENCH_1D(mean, M, m),
DATA_TYPE POLYBENCH_1D(stddev, M, m)) {
int i, j, k;
DATA_TYPE eps = SCALAR_VAL(0.1);
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
corr[_PB_M - 1][_PB_M - 1] = SCALAR_VAL(1.0);;
lbp=0;
ubp=floord(_PB_M-2,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=t2;t3<=floord(_PB_M-1,32);t3++) {
for (t4=32*t2;t4<=min(min(_PB_M-2,32*t2+31),32*t3+30);t4++) {
for (t5=max(32*t3,t4+1);t5<=min(_PB_M-1,32*t3+31);t5++) {
corr[t4][t5] = SCALAR_VAL(0.0);;
}
}
}
}
lbp=0;
ubp=floord(_PB_M-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=32*t2;t3<=min(_PB_M-2,32*t2+31);t3++) {
corr[t3][t3] = SCALAR_VAL(1.0);;
stddev[t3] = SCALAR_VAL(0.0);;
mean[t3] = SCALAR_VAL(0.0);;
}
if (t2 >= ceild(_PB_M-32,32)) {
stddev[(_PB_M-1)] = SCALAR_VAL(0.0);;
mean[(_PB_M-1)] = SCALAR_VAL(0.0);;
}
}
if (_PB_N >= 1) {
lbp=0;
ubp=floord(_PB_M-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=floord(_PB_N-1,32);t3++) {
for (t4=32*t3;t4<=min(_PB_N-1,32*t3+31);t4++) {
for (t5=32*t2;t5<=min(_PB_M-1,32*t2+31);t5++) {
mean[t5] += data[t4][t5];;
}
}
}
}
}
lbp=0;
ubp=floord(_PB_M-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=32*t2;t3<=min(_PB_M-1,32*t2+31);t3++) {
mean[t3] /= float_n;;
}
}
if (_PB_N >= 1) {
lbp=0;
ubp=floord(_PB_M-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=floord(_PB_N-1,32);t3++) {
for (t4=32*t3;t4<=min(_PB_N-1,32*t3+31);t4++) {
for (t5=32*t2;t5<=min(_PB_M-1,32*t2+31);t5++) {
stddev[t5] += (data[t4][t5] - mean[t5]) * (data[t4][t5] - mean[t5]);;
data[t4][t5] -= mean[t5];;
}
}
}
}
}
lbp=0;
ubp=floord(_PB_M-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=32*t2;t3<=min(_PB_M-1,32*t2+31);t3++) {
stddev[t3] /= float_n;;
stddev[t3] = SQRT_FUN(stddev[t3]);;
stddev[t3] = stddev[t3] <= eps ? SCALAR_VAL(1.0) : stddev[t3];;
}
}
if (_PB_M >= 1) {
lbp=0;
ubp=floord(_PB_N-1,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=0;t3<=floord(_PB_M-1,32);t3++) {
for (t4=32*t2;t4<=min(_PB_N-1,32*t2+31);t4++) {
for (t5=32*t3;t5<=min(_PB_M-1,32*t3+31);t5++) {
data[t4][t5] /= SQRT_FUN(float_n) * stddev[t5];;
}
}
}
}
}
if (_PB_N >= 1) {
lbp=0;
ubp=floord(_PB_M-2,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=t2;t3<=floord(_PB_M-1,32);t3++) {
for (t4=0;t4<=floord(_PB_N-1,32);t4++) {
for (t5=32*t2;t5<=min(min(_PB_M-2,32*t2+31),32*t3+30);t5++) {
for (t6=32*t4;t6<=min(_PB_N-1,32*t4+31);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(_PB_M-1,32*t3+31);t7++) {
corr[t5][t7] += (data[t6][t5] * data[t6][t7]);;
}
}
}
}
}
}
}
lbp=0;
ubp=floord(_PB_M-2,32);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=t2;t3<=floord(_PB_M-1,32);t3++) {
for (t4=32*t2;t4<=min(min(_PB_M-2,32*t2+31),32*t3+30);t4++) {
for (t5=max(32*t3,t4+1);t5<=min(_PB_M-1,32*t3+31);t5++) {
corr[t5][t4] = corr[t4][t5];;
}
}
}
}
}
int main(int argc, char **argv) {
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data, DATA_TYPE, N, M, n, m);
POLYBENCH_2D_ARRAY_DECL(corr, DATA_TYPE, M, M, m, m);
POLYBENCH_1D_ARRAY_DECL(mean, DATA_TYPE, M, m);
POLYBENCH_1D_ARRAY_DECL(stddev, DATA_TYPE, M, m);
/* Initialize array(s). */
init_array(m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_correlation(m, n, float_n, POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(corr), POLYBENCH_ARRAY(mean),
POLYBENCH_ARRAY(stddev));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(corr)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(corr);
POLYBENCH_FREE_ARRAY(mean);
POLYBENCH_FREE_ARRAY(stddev);
return 0;
}
|
pi-omp.c | /* ==================================== *\
|| OpenMP calculate Pi ||
\* ==================================== */
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <limits.h>
#include <string.h>
#include <signal.h>
#include <time.h>
#include <omp.h>
#include <mpfr.h>
//#define func(x, size) (sqrt((size)*(size) - (x)*(x)))
volatile long int ctr = 0;
struct timespec start;
void timespec_diff(struct timespec *start, struct timespec *end)
{
end->tv_sec -= start->tv_sec;
if(end->tv_nsec < start->tv_nsec)
{
end->tv_sec--;
end->tv_nsec -= start->tv_nsec;
end->tv_nsec += 1000*1000*1000;
}
}
void show_speed(union sigval sigval)
{
struct timespec current;
char spin[] = {'-', '\\', '|', '/'};
static int spin_pos = 0;
clock_gettime(CLOCK_MONOTONIC, ¤t);
timespec_diff(&start, ¤t);
float msec = (float)current.tv_sec * 1000 + (float)current.tv_nsec / 1000 / 1000;
float speed = ctr / msec; // k
fprintf(stderr, "\rC =%20ld | Avg. Speed = %8.3fkOP/s %c", ctr, speed, spin[spin_pos++]);
spin_pos %= 4;
}
int main(int argc, char **argv)
{
intmax_t scale;
const mpfr_prec_t prec = 4096;
struct timespec end;
MPFR_DECL_INIT(area, prec);
MPFR_DECL_INIT(scale_pow2, prec);
if(argc != 2)
goto error;
scale = atoll(argv[1]);
if(scale <= 0)
goto error;
timer_t timer;
struct sigevent ev =
{
.sigev_notify = SIGEV_THREAD,
.sigev_notify_function = show_speed,
.sigev_notify_attributes = NULL
};
timer_create(CLOCK_MONOTONIC, &ev, &timer);
struct itimerspec period =
{
.it_value.tv_sec=1,
.it_interval.tv_sec=0,
.it_interval.tv_nsec=500000000L // 10Hz
};
timer_settime(timer, TIMER_ABSTIME, &period, NULL);
mpfr_set_zero(area, 1);
mpfr_set_si(scale_pow2, scale, MPFR_RNDN);
mpfr_sqr(scale_pow2, scale_pow2, MPFR_RNDN); // size^2
// Start time
clock_gettime(CLOCK_MONOTONIC, &start);
#pragma omp parallel
{
MPFR_DECL_INIT(area_slice_delta, prec);
MPFR_DECL_INIT(area_slice, prec);
MPFR_DECL_INIT(x_pow2, prec);
mpfr_set_zero(area_slice_delta, 1);
mpfr_set_zero(area_slice, 1);
#pragma omp for
for(intmax_t i = 0; i <= scale; i++)
{
/* intergrate! */
mpfr_set_si(x_pow2, i, MPFR_RNDN);
mpfr_sqr(x_pow2, x_pow2, MPFR_RNDN); // x^2
mpfr_sub(area_slice_delta, scale_pow2, x_pow2, MPFR_RNDN);
//mpfr_fprintf(stderr, "[%d] x = %ld, x^2 = %.18Rf, size^2 = %.18Rf, Da = %.18Rf, area_slice = %.18Rf\n", omp_get_thread_num(), i, x_pow2, size_pow2, area_slice_delta, area_slice);
mpfr_sqrt(area_slice_delta, area_slice_delta, MPFR_RNDN);
mpfr_add(area_slice, area_slice, area_slice_delta, MPFR_RNDN);
ctr++;
}
#pragma omp critical
{
// Add all area together
mpfr_add(area, area, area_slice, MPFR_RNDN);
}
}
// Scale back to 1 (and multiply by 2)
mpfr_div_si(area, area, scale * scale / 4, MPFR_RNDN);
// End time
clock_gettime(CLOCK_MONOTONIC, &end);
timespec_diff(&start, &end);
mpfr_fprintf(stderr, "\nPi = %.128Rf\n", area);
exit(0);
error:
exit(1);
}
|
iFDMEX.c | #include "mex.h"
#ifdef __GNU__
#include <omp.h>
#endif
#ifndef MAXCORES
#define MAXCORES 1
#endif
void mexFunction(int nlhs, mxArray *left[], int nrhs, const mxArray *right[]) {
/* Declare variables */
mwSize nD, NP=1, NV=1, NS=1, NT=1, RT=1;
const mwSize *sz;
mxClassID precision;
mxArray *I;
mwSize np, nv, ns, nt, ind, sh, sh_rt, sh_nt, sh_ns, jump, dir;
long long rt;
double *pdir, *pIr, *pIi, *pFDr, *pFDi;
float *pdirf, *pIrf, *pIif, *pFDrf, *pFDif;
/* Get sizes */
np = mxGetM(right[0]);
nv = mxGetN(right[0]);
nD = mxGetNumberOfDimensions(right[0]);
sz = mxGetDimensions(right[0]);
precision = mxGetClassID(right[0]);
/* Determine how many rows, columns, depth, time, and other dimensions */
/* This is done to parallelize the "other dimensions" */
NP = sz[0];
if (nD > 1)
NV = sz[1];
if (nD > 2)
NS = sz[2];
if (nD > 3)
NT = sz[3];
if (nD > 4) {
for (np=4; np<nD; np++){
RT *= sz[np];
}
}
/*mexPrintf("NP: %i\n",NP);
mexPrintf("NV: %i\n",NV);
mexPrintf("NS: %i\n",NS);
mexPrintf("NT: %i\n",NT);
mexPrintf("RT: %i\n",RT);*/
/* Check if complex */
/*if (!mxIsComplex(right[0]))
mexErrMsgTxt("Function requires complex data");*/
/* Create output and get input/output pointers */
if (precision == mxDOUBLE_CLASS) {
if (mxIsComplex(right[0])) {
I = mxCreateNumericArray(nD,sz,precision,mxCOMPLEX);
pFDi = mxGetPi(right[0]);
pIi = mxGetPi(I);
}
else {
I = mxCreateNumericArray(nD,sz,precision,mxREAL);
}
pFDr = mxGetPr(right[0]);
pIr = mxGetPr(I);
}
else {
if (mxIsComplex(right[0])) {
I = mxCreateNumericArray(nD,sz,precision,mxCOMPLEX);
pFDif = mxGetImagData(right[0]);
pIif = mxGetImagData(I);
}
else {
I = mxCreateNumericArray(nD,sz,precision,mxREAL);
}
pFDrf = mxGetData(right[0]);
pIrf = mxGetData(I);
}
/* Get transform direction */
if (mxGetClassID(right[1]) == mxDOUBLE_CLASS) {
pdir = mxGetData(right[1]);
dir = (mwSize) pdir[0];
}
else {
pdirf = mxGetData(right[1]);
dir = (mwSize) pdirf[0];
}
#ifdef __GNU__
/* Set number of threads */
omp_set_num_threads(MAXCORES);
#endif
/* Compute finite differences */
if (precision == mxDOUBLE_CLASS) {
if (dir == 1) {
jump = -1;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=1; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
pIi[ind] = pFDi[ind+jump] - pFDi[ind];
}
/*ind++;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];*/
}
}
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=1; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
}
/*ind++;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];*/
}
}
}
}
}
}
else if (dir == 2) {
jump = -NP;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=1; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
pIi[ind] = pFDi[ind+jump] - pFDi[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];
* }*/
}
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=1; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];
* }*/
}
}
}
}
}
else if (dir == 3) {
jump = -NP*NV;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=1; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
pIi[ind] = pFDi[ind+jump] - pFDi[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }*/
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=1; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }*/
}
}
}
}
else if (dir == 4) {
jump = -NP*NV*NS;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=1; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
pIi[ind] = pFDi[ind+jump] - pFDi[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }
* }*/
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=1; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIr[ind] = pFDr[ind+jump] - pFDr[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }
* }*/
}
}
}
else
mexErrMsgTxt("Unsupported transform direction");
}
/* Single precision */
else {
if (dir == 1) {
jump = -1;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=1; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
pIif[ind] = pFDif[ind+jump] - pFDif[ind];
}
/*ind++;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];*/
}
}
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=1; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
}
/*ind++;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];*/
}
}
}
}
}
}
else if (dir == 2) {
jump = -NP;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=1; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
pIif[ind] = pFDif[ind+jump] - pFDif[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];
* }*/
}
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=1; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
}
}
/*sh = sh_rt + sh_nt + sh_ns + (NV-1)*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pFDr[ind] = -pIr[ind];
* pFDi[ind] = -pIi[ind];
* }*/
}
}
}
}
}
else if (dir == 3) {
jump = -NP*NV;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=1; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
pIif[ind] = pFDif[ind+jump] - pFDif[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }*/
}
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=0; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=1; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
}
}
}
/*sh_ns = (NS-1)* NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }*/
}
}
}
}
else if (dir == 4) {
jump = -NP*NV*NS;
if (mxIsComplex(right[0])) {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=1; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
pIif[ind] = pFDif[ind+jump] - pFDif[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }
* }*/
}
}
else {
#pragma omp parallel for private(rt, sh_rt, nt, sh_nt, ns, sh_ns, nv, sh, np, ind) shared(jump)
for (rt=0; rt<RT; rt++) {
sh_rt = rt * NT*NS*NV*NP;
for (nt=1; nt<NT; nt++) {
sh_nt = nt * NS*NV*NP;
for (ns=0; ns<NS; ns++) {
sh_ns = ns * NV*NP;
for (nv=0; nv<NV; nv++) {
sh = sh_rt + sh_nt + sh_ns + nv*NP;
for (np=0; np<NP; np++) {
ind = sh + np;
pIrf[ind] = pFDrf[ind+jump] - pFDrf[ind];
}
}
}
}
/*sh_nt = (NT-1)*NS*NV*NP;
* for (ns=0; ns<NS; ns++) {
* sh_ns = ns * NV*NP;
* for (nv=0; nv<NV; nv++) {
* sh = sh_rt + sh_nt + sh_ns + nv*NP;
* for (np=0; np<NP; np++) {
* ind = sh + np;
* pIr[ind] = -pFDr[ind];
* pIi[ind] = -pFDi[ind];
* }
* }
* }*/
}
}
}
else
mexErrMsgTxt("Unsupported transform direction");
}
/* Output */
left[0] = I;
}
|
GB_unop__acosh_fp64_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__acosh_fp64_fp64)
// op(A') function: GB (_unop_tran__acosh_fp64_fp64)
// C type: double
// A type: double
// cast: double cij = aij
// unaryop: cij = acosh (aij)
#define GB_ATYPE \
double
#define GB_CTYPE \
double
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = acosh (x) ;
// casting
#define GB_CAST(z, aij) \
double z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
double z = aij ; \
Cx [pC] = acosh (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__acosh_fp64_fp64)
(
double *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acosh (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
double z = aij ;
Cx [p] = acosh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__acosh_fp64_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.