source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
apply_bcs_curvilinear.h |
// Declare boundary condition BC_UPDATE_OUTER macro,
// which updates a single outer boundary face
// of the 3D grid cube using quadratic polynomial
// extrapolation.
#define BC_UPDATE_OUTER(which_gf, i0,i1,i2, FACEX0,FACEX1,FACEX2) { \
const int idx3 = IDX3S(i0,i1,i2); \
gfs[IDX4S(which_gf,i0,i1,i2)] = \
+3.0*gfs[IDX4S(which_gf,i0+1*FACEX0,i1+1*FACEX1,i2+1*FACEX2)] \
-3.0*gfs[IDX4S(which_gf,i0+2*FACEX0,i1+2*FACEX1,i2+2*FACEX2)] \
+1.0*gfs[IDX4S(which_gf,i0+3*FACEX0,i1+3*FACEX1,i2+3*FACEX2)]; \
}
// Curvilinear boundary condition driver routine: Apply BCs to all six
// boundary faces of the 3D numerical domain, filling in the
// innermost ghost zone layer first, and moving outward.
void apply_bcs_curvilinear(const paramstruct *restrict params, const bc_struct *restrict bcstruct,
const int NUM_GFS, const int8_t *restrict gfs_parity, REAL *restrict gfs) {
#pragma omp parallel for
for(int which_gf=0;which_gf<NUM_GFS;which_gf++) {
#include "../set_Cparameters.h"
for(int which_gz = 0; which_gz < NGHOSTS; which_gz++) {
// First apply OUTER boundary conditions,
// in case an INNER (parity) boundary point
// needs data at the outer boundary:
// After updating each face, adjust imin[] and imax[]
// to reflect the newly-updated face extents.
for(int pt=0;pt<bcstruct->num_ob_gz_pts[which_gz];pt++) {
BC_UPDATE_OUTER(which_gf,
bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i0,
bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i1,
bcstruct->outer[which_gz][pt].outer_bc_dest_pt.i2,
bcstruct->outer[which_gz][pt].FACEi0,
bcstruct->outer[which_gz][pt].FACEi1,
bcstruct->outer[which_gz][pt].FACEi2);
}
// Then apply INNER (parity) boundary conditions:
for(int pt=0;pt<bcstruct->num_ib_gz_pts[which_gz];pt++) {
const int i0dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i0;
const int i1dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i1;
const int i2dest = bcstruct->inner[which_gz][pt].inner_bc_dest_pt.i2;
const int i0src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i0;
const int i1src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i1;
const int i2src = bcstruct->inner[which_gz][pt].inner_bc_src_pt.i2;
const int8_t *prty= bcstruct->inner[which_gz][pt].parity;
// printf("%d\n",bcstruct->inner_bc_parity[which_gz][pt].parity[gfs_parity[which_gf]]);
gfs[IDX4S(which_gf,i0dest,i1dest,i2dest)] =
bcstruct->inner[which_gz][pt].parity[gfs_parity[which_gf]] * gfs[IDX4S(which_gf, i0src,i1src,i2src)];
} // END for(int pt=0;pt<num_ib_gz_pts[which_gz];pt++)
} // END for(int which_gz = 0; which_gz < NGHOSTS; which_gz++)
} // END for(int which_gf=0;which_gf<NUM_GFS;which_gf++)
} // END function |
utilityNestedDisectionMetis.h | // ***********************************************************************
//
// Grappolo: A C++ library for graph clustering
// Mahantesh Halappanavar (hala@pnnl.gov)
// Pacific Northwest National Laboratory
//
// ***********************************************************************
//
// Copyright (2014) Battelle Memorial Institute
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// ************************************************************************
#ifndef _graph_NestDisect_
#define _graph_NestDisect_
/*
int METIS NodeND(idx t *nvtxs, idx t *xadj, idx t *adjncy, idx t *vwgt, idx t *options,
idx t *perm, idx t *iperm)
Description
This function computes fill reducing orderings of sparse matrices using the multilevel nested dissection algorithm.
Parameters
nvtxs: The number of vertices in the graph.
xadj, adjncy: The adjacency structure of the graph as described in Section 5.5.
vwgt (NULL): An array of size nvtxs specifying the weights of the vertices. If the graph is weighted, the nested dissection ordering computes vertex separators that minimize the sum of the weights of the vertices
on the separators. A NULL can be passed to indicate a graph with equal weight vertices (or unweighted).
options (NULL)
This is the array of options as described in Section 5.4. The following options are valid:
METIS_OPTION_CTYPE, METIS_OPTION_RTYPE, METIS_OPTION_NO2HOP,
METIS_OPTION_NSEPS, METIS_OPTION_NITER, METIS_OPTION_UFACTOR,
METIS_OPTION_COMPRESS, METIS_OPTION_CCORDER, METIS_OPTION_SEED,
METIS_OPTION_PFACTOR, METIS_OPTION_NUMBERING, METIS_OPTION_DBGLVL
perm, iperm: These are vectors, each of size nvtxs. Upon successful completion, they store the fill-reducing permutation and inverse-permutation. Let A be the original matrix and A0 be the permuted matrix. The
arrays perm and iperm are defined as follows.
Row (column) i of A0 is the perm[i] row (column) of A, and row (column) i of A is the iperm[i] row (column) of A0. The numbering of this vector starts from either 0 or 1, depending on the value of options[METIS OPTION NUMBERING].
Returns:
METIS OK Indicates that the function returned normally.
METIS ERROR INPUT Indicates an input error.
METIS ERROR MEMORY Indicates that it could not allocate the required memory.
METIS ERROR Indicates some other type of error.
*/
extern "C" {
#include "metis.h"
}
using namespace std;
/*
#ifdef __cplusplus
extern "C" {
#endif
//Nested dissection
int METIS_NodeND(idx t *nvtxs, idx t *xadj, idx t *adjncy, idx t *vwgt, idx t *options,
idx t *perm, idx t *iperm);
#ifdef __cplusplus
}
#endif
*/
//METIS Graph Partitioner:
void MetisNDReorder( graph *G, long *old2NewMap ) {
printf("Within MetisNDReorder(): \n");
//Get the iterators for the graph:
long NV = G->numVertices;
long NE = G->numEdges;
long *vtxPtr = G->edgeListPtrs;
edge *vtxInd = G->edgeList;
printf("|V|= %ld, |E|= %ld \n", NV, NE);
int status=0;
idx_t nvtxs = (idx_t) NV;
idx_t *xadj = (idx_t *) malloc ((NV+1) * sizeof(idx_t));
assert(xadj != 0);
#pragma omp parallel for
for(long i=0; i<=NV; i++) {
xadj[i] = (idx_t) vtxPtr[i];
}
idx_t *adjncy = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjncy != 0);
#pragma omp parallel for
for(long i=0; i<2*NE; i++) {
adjncy[i] = (idx_t) vtxInd[i].tail;
}
idx_t *adjwgt = (idx_t *) malloc (2*NE * sizeof(idx_t));
assert(adjwgt != 0);
#pragma omp parallel for
for(long i=0; i<2*NE; i++) {
adjwgt[i] = (idx_t) vtxInd[i].weight;
}
idx_t *perm = (idx_t *) malloc (NV * sizeof(idx_t)); assert(perm != 0);
idx_t *iperm = (idx_t *) malloc (NV * sizeof(idx_t)); assert(iperm != 0);
real_t ubvec = 1.03;
idx_t options[METIS_NOPTIONS];
METIS_SetDefaultOptions(options);
options[METIS_OPTION_CTYPE] = METIS_CTYPE_SHEM; //Sorted heavy-edge matching
options[METIS_OPTION_IPTYPE] = METIS_IPTYPE_NODE; //Grows a bisection using a greedy strategy.
options[METIS_OPTION_RTYPE] = METIS_RTYPE_SEP1SIDED; //FM-based cut refinement.
options[METIS_OPTION_DBGLVL] = 1; //#different separators at each level of nested dissection.
options[METIS_OPTION_UFACTOR] = 200; //Maximum allowed load imbalance among partitions
options[METIS_OPTION_NO2HOP] = 0; //The 2–hop matching (0=perform; 1=Do not)
options[METIS_OPTION_COMPRESS] = 1; //Combine vertices with identical adjacency lists (0=do not)
options[METIS_OPTION_CCORDER] = 0; //Connected components identified and ordered separately (1=Yes)
options[METIS_OPTION_SEED] = 786; //Specifies the seed for the random number generator.
options[METIS_OPTION_NITER] = 10; //#iterations for the refinement algorithms
options[METIS_OPTION_NSEPS] = 1; //#different separators
options[METIS_OPTION_PFACTOR] = 10; //Min degree of the vertices that will be ordered last
options[METIS_OPTION_NUMBERING]= 0; //C-style numbering, starting from 0
/* int returnVal = METIS_PartGraphKway(&nvtxs, &ncon, xadj, adjncy, NULL, NULL, adjwgt,
&nparts, NULL, NULL, options, &objval, part); */
status = METIS_NodeND(&nvtxs, xadj, adjncy, NULL, options, perm, iperm);
if(status == METIS_OK)
printf("Nested dissection returned correctly. Will store the permutations in vectors perm and iperm.\n");
else {
if(status == METIS_ERROR_MEMORY)
printf("Metis could not allocate memory.\n");
else if(status == METIS_ERROR_INPUT)
printf("Metis had issues with input.\n");
else
printf("Some other Metis error: %ld\n", status);
}
#pragma omp parallel for
for(long i=0; i<=NV; i++) {
old2NewMap[i] = (long) perm[i]; //Do explicit typecasts
}
//Cleaup:
free(xadj); free(adjncy); free(adjwgt);
free(perm); free(iperm);
printf("Returning back from Metis\n");
}
#endif
|
vect-simd-clone-4.c | /* { dg-require-effective-target vect_simd_clones } */
/* { dg-additional-options "-fopenmp-simd" } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
#include "tree-vect.h"
#ifndef N
#define N 1024
#endif
float d[N];
int e[N];
unsigned short f[N];
#pragma omp declare simd simdlen(8) notinbranch uniform(b)
__attribute__((noinline)) float
foo (float a, float b, float c)
{
if (a < 30)
return 5.0f;
return a + b + c;
}
__attribute__((noinline, noclone)) void
bar ()
{
int i;
#pragma omp simd
for (i = 0; i < N; ++i)
{
d[i] = foo (i, 123, i * 3);
e[i] = e[i] * 3;
f[i] = f[i] + 1;
}
}
int
main ()
{
int i;
check_vect ();
bar ();
for (i = 0; i < N; i++)
if (d[i] != (i < 30 ? 5.0f : i * 4 + 123.0f) || e[i] || f[i] != 1)
abort ();
return 0;
}
|
pr60823-3.c | /* PR tree-optimization/60823 */
/* { dg-do compile } */
/* { dg-options "-O2 -fopenmp-simd -fno-strict-aliasing" } */
void bar (char *, double *);
#if __SIZEOF_DOUBLE__ >= 4
struct S { char c[sizeof (double)]; };
void baz (struct S, struct S);
union U { struct S s; double d; };
#pragma omp declare simd simdlen(4) notinbranch
__attribute__((noinline)) int
foo (double c1, double c2)
{
double *a = &c1;
char *b = (char *) &c1 + 2;
b[-2]++;
b[1]--;
*a++;
c2++;
bar ((char *) &c2 + 1, &c2);
c2 *= 3.0;
bar (b, a);
baz (((union U) { .d = c1 }).s, ((union U) { .d = c2 }).s);
baz (*(struct S *)&c1, *(struct S *)&c2);
return c1 + c2 + ((struct S *)&c1)->c[1];
}
#endif
|
rawBLAKE2_512_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2012 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Re-used for BLAKE2 by Dhiru Kholia (dhiru at openwall.com)
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawBLAKE2;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawBLAKE2);
#else
#include "arch.h"
#include "blake2.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include <string.h>
#if !FAST_FORMATS_OMP
#undef _OPENMP
#endif
#ifdef _OPENMP
#ifndef OMP_SCALE
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Raw-Blake2"
#define FORMAT_NAME ""
#if defined(__AVX__)
#define ALGORITHM_NAME "128/128 AVX"
#elif defined(__XOP__)
#define ALGORITHM_NAME "128/128 XOP"
#elif defined(__SSE4_1__)
#define ALGORITHM_NAME "128/128 SSE4.1"
#elif defined(__SSSE3__)
#define ALGORITHM_NAME "128/128 SSSE3"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "128/128 SSE2"
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define CIPHERTEXT_LENGTH 128
#define BINARY_SIZE 64
#define SALT_SIZE 0
#define BINARY_ALIGN 4
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests tests[] = {
{"4245af08b46fbb290222ab8a68613621d92ce78577152d712467742417ebc1153668f1c9e1ec1e152a32a9c242dc686d175e087906377f0c483c5be2cb68953e", "blake2"},
{"$BLAKE2$021ced8799296ceca557832ab941a50b4a11f83478cf141f51f933f653ab9fbcc05a037cddbed06e309bf334942c4e58cdf1a46e237911ccd7fcf9787cbc7fd0", "hello world"},
/* hash generated by multiple versions (in C and Go) of b2sum program */
{"$BLAKE2$1f7d9b7c9a90f7bfc66e52b69f3b6c3befbd6aee11aac860e99347a495526f30c9e51f6b0db01c24825092a09dd1a15740f0ade8def87e60c15da487571bcef7", "verystrongandlongpassword"},
/* test vectors from Wikipedia */
{"$BLAKE2$a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918", "The quick brown fox jumps over the lazy dog"},
{"$BLAKE2$786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce", ""},
{"$BLAKE2$da40d8f48e9e7560c56e2b92205aed6342a276994ca0287ea4f8c1423ef07d519ecb4bf8668c118379a36be8aa6c077bbc6213fa81fbb332fad9d8a19a7756e6", "UPPERCASE"},
{"$BLAKE2$f5ab8bafa6f2f72b431188ac38ae2de7bb618fb3d38b6cbf639defcdd5e10a86b22fccff571da37e42b23b80b657ee4d936478f582280a87d6dbb1da73f5c47d", "123456789"},
{NULL}
};
static int (*saved_len);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)
[(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_len = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_len));
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
MEM_FREE(saved_len);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *p, *q;
p = ciphertext;
if (!strncmp(p, "$BLAKE2$", 8))
p += 8;
q = p;
while (atoi16[ARCH_INDEX(*q)] != 0x7F)
q++;
return !*q && q - p == CIPHERTEXT_LENGTH;
}
static char *split(char *ciphertext, int index, struct fmt_main *pFmt)
{
static char out[8 + CIPHERTEXT_LENGTH + 1];
if (!strncmp(ciphertext, "$BLAKE2$", 8))
ciphertext += 8;
memcpy(out, "$BLAKE2$", 8);
memcpy(out + 8, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(out + 8);
return out;
}
static void *get_binary(char *ciphertext)
{
static unsigned char *out;
char *p;
int i;
if (!out) out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
p = ciphertext + 8;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index)
{
return crypt_out[index][0] & PH_MASK_0;
}
static int get_hash_1(int index)
{
return crypt_out[index][0] & PH_MASK_1;
}
static int get_hash_2(int index)
{
return crypt_out[index][0] & PH_MASK_2;
}
static int get_hash_3(int index)
{
return crypt_out[index][0] & PH_MASK_3;
}
static int get_hash_4(int index)
{
return crypt_out[index][0] & PH_MASK_4;
}
static int get_hash_5(int index)
{
return crypt_out[index][0] & PH_MASK_5;
}
static int get_hash_6(int index)
{
return crypt_out[index][0] & PH_MASK_6;
}
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_len[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_len[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
static char *get_key(int index)
{
saved_key[index][saved_len[index]] = 0;
return saved_key[index];
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
(void)blake2b((uint8_t *)crypt_out[index], saved_key[index], NULL, 64, saved_len[index], 0);
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_rawBLAKE2 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"BLAKE2b 512 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
#ifdef _OPENMP
FMT_OMP | FMT_OMP_BAD |
#endif
FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE,
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
split,
get_binary,
fmt_default_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
mandel-omp-taskloop-row.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define GS 4 /* granularity of the taskloop */
#define NT 400 /* number of threads in the taskloop */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
#pragma omp parallel
#pragma omp single
//#pragma omp taskloop num_tasks(NT)
#pragma omp taskloop grainsize(GS)
for (row = 0; row < height; ++row) {
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
#pragma omp critical
{
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
FGT_fmt_plug.c | /*
* Fortigate (FortiOS) Password cracker
*
* This software is Copyright (c) 2012 Mat G. <mat.jtr at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
*
* Passwords are located in "config system admin" part of the configuration file :
*
* config system admin
* edit "<username>"
* set password ENC AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU=
*
* Password is : AK1|base64encode(salt|hashed_password)
* where hashed_password is SHA1(salt|password|fortinet_magic)
*
* salt is 12 bytes long
* hashed_password is 20 bytes long (SHA1 salt)
* encoded password is 47 bytes long (3 bytes for AK1 and 44 bytes of base64encode(salt|hashed_password))
*
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_FGT;
#elif FMT_REGISTERS_H
john_register_one(&fmt_FGT);
#else
#include <string.h>
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "sha.h"
#include "base64.h"
#include "sse-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Fortigate"
#define FORMAT_NAME "FortiOS"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 32
#define CIPHERTEXT_LENGTH 44
#define HASH_LENGTH CIPHERTEXT_LENGTH + 3
#define BINARY_SIZE 20
#define BINARY_ALIGN 4
#define SALT_SIZE 12
#define SALT_ALIGN 4
#define FORTINET_MAGIC "\xa3\x88\xba\x2e\x42\x4c\xb0\x4a\x53\x79\x30\xc1\x31\x07\xcc\x3f\xa1\x32\x90\x29\xa9\x81\x5b\x70"
#define FORTINET_MAGIC_LENGTH 24
#define MIN_KEYS_PER_CRYPT 1
#ifdef _OPENMP
#define MAX_KEYS_PER_CRYPT (0x200 * 3)
#else
#define MAX_KEYS_PER_CRYPT 0x100
#endif
static struct fmt_tests fgt_tests[] =
{
{"AK1wTiFOMv7mZOTvQNmKQBAY98hZZjSRLxAY8vZp8NlDWU=", "fortigate"},
{"AK1Vd1SCGVtAAT931II/U22WTppAISQkITHOlz0ukIg4nA=", "admin"},
{"AK1DZLDpqz335ElPtuiNTpguiozY7xVaHjHYnxw6sNlI6A=", "ftnt"},
{NULL}
};
static SHA_CTX ctx_salt;
static char saved_key[MAX_KEYS_PER_CRYPT][PLAINTEXT_LENGTH + 1];
static int saved_key_len[MAX_KEYS_PER_CRYPT];
static ARCH_WORD_32 crypt_key[MAX_KEYS_PER_CRYPT][BINARY_SIZE / sizeof(ARCH_WORD_32)];
static int valid(char *ciphertext, struct fmt_main *self)
{
if (strncmp(ciphertext, "AK1", 3))
return 0;
if (strlen(ciphertext) != HASH_LENGTH)
return 0;
return 1;
}
static void * get_salt(char *ciphertext)
{
static union {
char b[SALT_SIZE];
ARCH_WORD_32 dummy;
} out;
char buf[SALT_SIZE+BINARY_SIZE+1];
base64_decode(ciphertext+3, CIPHERTEXT_LENGTH, buf);
memcpy(out.b, buf, SALT_SIZE);
return out.b;
}
static void set_salt(void *salt)
{
SHA1_Init(&ctx_salt);
SHA1_Update(&ctx_salt, salt, SALT_SIZE);
}
static void set_key(char *key, int index)
{
strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1);
saved_key_len[index] = strlen(key);
}
static char * get_key(int index)
{
return saved_key[index];
}
static void * binary(char *ciphertext)
{
static union {
char b[BINARY_SIZE];
ARCH_WORD_32 dummy;
} bin;
char buf[SALT_SIZE+BINARY_SIZE+1];
memset(buf, 0, sizeof(buf));
base64_decode(ciphertext+3, CIPHERTEXT_LENGTH, buf);
// skip over the 12 bytes of salt and get only the hashed password
memcpy(bin.b, buf+SALT_SIZE, BINARY_SIZE);
return bin.b;
}
static int cmp_all(void *binary, int count)
{
ARCH_WORD_32 b0 = *(ARCH_WORD_32 *)binary;
int i;
for (i = 0; i < count; i++) {
if (b0 != *(ARCH_WORD_32 *)crypt_key[i])
continue;
if (!memcmp(binary, crypt_key[i], BINARY_SIZE))
return 1;
}
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int i;
char *cp=FORTINET_MAGIC;
#ifdef _OPENMP
#pragma omp parallel for default(none) private(i) shared(ctx_salt, count, saved_key, saved_key_len, crypt_key, cp)
#endif
for (i = 0; i < count; i++) {
SHA_CTX ctx;
memcpy(&ctx, &ctx_salt, sizeof(ctx));
SHA1_Update(&ctx, saved_key[i], saved_key_len[i]);
SHA1_Update(&ctx, cp, FORTINET_MAGIC_LENGTH);
SHA1_Final((unsigned char*)crypt_key[i], &ctx);
}
return count;
}
static int get_hash_0(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & 0xf; }
static int get_hash_1(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & 0xff; }
static int get_hash_2(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & 0xfff; }
static int get_hash_3(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & 0xffff; }
static int get_hash_4(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & 0xfffff; }
static int get_hash_5(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & 0xffffff; }
static int get_hash_6(int index) { return ((ARCH_WORD_32 *)(crypt_key[index]))[0] & 0x7ffffff; }
static int salt_hash(void *salt)
{
ARCH_WORD_32 mysalt = *(ARCH_WORD_32 *)salt;
return mysalt & (SALT_HASH_SIZE - 1);
}
struct fmt_main fmt_FGT = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP ,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fgt_tests
}, {
fmt_default_init,
fmt_default_done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
binary,
get_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
salt_hash,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
ompcompress.c | #ifdef _OPENMP
#if defined(WITH_IPP)
/*
* This source code file was modified with Intel(R) Integrated Performance Primitives library content
*/
#endif
/* compress 1d contiguous array in parallel */
static void
_t2(compress_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), 1);
else
_t2(zfp_encode_block, Scalar, 1)(&s, p);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 1d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 1)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
int sx = field->sx ? field->sx : 1;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint blocks = (nx + 3) / 4;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin x within array */
const Scalar* p = data;
uint x = 4 * block;
p += sx * (ptrdiff_t)x;
/* compress partial or full block */
if (nx - x < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 1)(&s, p, MIN(nx - x, 4u), sx);
else
_t2(zfp_encode_block_strided, Scalar, 1)(&s, p, sx);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 2d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 2)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : nx;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint blocks = bx * by;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y) within array */
const Scalar* p = data;
uint b = block;
uint x, y;
x = 4 * (b % bx); b /= bx;
y = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 2)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), sx, sy);
else
_t2(zfp_encode_block_strided, Scalar, 2)(&s, p, sx, sy);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#if defined(IPP_OPTIMIZATION_ENABLED) && !defined(_SET_TMP_BLOCK_FROM_)
#define _SET_TMP_BLOCK_FROM_
/* Copy partial data to 4x4x4 block */
static void CopyFromPartialBlock(const Ipp32f *pSrc, int stepY, int stepZ, int sizeX, int sizeY, int sizeZ, Ipp32f *pTmpBlock)
{
Ipp32f *pTmp;
int x, y, z, serIdx;
int copyX, copyY, copyZ;
for (serIdx = z = 0; z < 4; z++) {
copyZ = (z < sizeZ) ? z : sizeZ - 1;
for (y = 0; y < 4; y++) {
copyY = (y < sizeY) ? y : sizeY - 1;
pTmp = (Ipp32f*)pSrc + copyZ * stepZ + copyY * stepY;
for (x = 0; x < 4; x++) {
copyX = (x < sizeX) ? x : sizeX - 1;
pTmpBlock[serIdx++] = pTmp[copyX];
}
}
}
}
#endif
/* compress 3d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 3)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = (const Scalar*)field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : nx;
int sz = field->sz ? field->sz : (ptrdiff_t)nx * ny;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint blocks = bx * by * bz;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
#if defined (IPP_OPTIMIZATION_ENABLED)
IppEncodeZfpState_32f* pStates = NULL;
Ipp64u* chunk_bit_lengths = (Ipp64u*)malloc(sizeof(Ipp64u)* chunks);
int srcBlockLineStep = nx * sizeof(Ipp32f);
int srcBlockPlaneStep = ny * srcBlockLineStep;
uint min_bits, max_bits, max_prec;
int min_exp;
int sizeState = 0;
/**/
zfp_stream_params(stream, &min_bits, &max_bits, &max_prec, &min_exp);
/* allocate per-thread IppEncodeZfpState_32f */
ippsEncodeZfpGetStateSize_32f(&sizeState);
pStates = (IppEncodeZfpState_32f*)ippsMalloc_8u(sizeState * threads);
#endif
/* compress chunks of blocks in parallel */
int chunk;
#if !defined (IPP_OPTIMIZATION_ENABLED)
#pragma omp parallel for num_threads(threads)
#else
//firstprivate (min_bits, max_bits, max_prec, min_exp)
#pragma omp parallel \
num_threads(threads)
{
bitstream *pBitStream = NULL;
IppEncodeZfpState_32f* pState = NULL;
Ipp32f pTmpBlock[64];
pState = (IppEncodeZfpState_32f*)((Ipp8u*)pStates + omp_get_thread_num() * sizeState);
#pragma omp for
#endif
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
#if defined (IPP_OPTIMIZATION_ENABLED)
pBitStream = bs[chunk];
ippsEncodeZfpInitLong_32f((Ipp8u*)stream_data(pBitStream), stream_capacity(pBitStream), pState);
ippsEncodeZfpSet_32f(min_bits, max_bits, max_prec, min_exp, pState);
#endif
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
const Scalar* p = data;
uint b = block;
uint x, y, z;
/* determine block origin (x, y, z) within array */
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z;
// compress partial or full block
if (nx - x < 4 || ny - y < 4 || nz - z < 4)
{
#if !defined(IPP_OPTIMIZATION_ENABLED)
_t2(zfp_encode_partial_block_strided, Scalar, 3)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), sx, sy, sz);
#else
CopyFromPartialBlock((const Ipp32f *)p, sy, sz, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), pTmpBlock);
ippsEncodeZfp444_32f(pTmpBlock, 4 * sizeof(Ipp32f), 4 * 4 * sizeof(Ipp32f), pState);
#endif
}
else
{
#if !defined(IPP_OPTIMIZATION_ENABLED)
_t2(zfp_encode_block_strided, Scalar, 3)(&s, p, sx, sy, sz);
#else
ippsEncodeZfp444_32f((const Ipp32f *)p, srcBlockLineStep, srcBlockPlaneStep, pState);
#endif
}
}
#if defined (IPP_OPTIMIZATION_ENABLED)
if (pState != NULL)
{
Ipp64u chunk_compr_length;
ippsEncodeZfpGetCompressedBitSize_32f(pState, &chunk_bit_lengths[chunk]);
ippsEncodeZfpFlush_32f(pState);
chunk_compr_length = (size_t)((chunk_bit_lengths[chunk] + 7) >> 3);
stream_set_eos(pBitStream, chunk_compr_length);
}
#endif
}
#if defined (IPP_OPTIMIZATION_ENABLED)
}//The end of pragma omp parallel block
/* concatenate per-thread streams */
if (pStates != NULL)
{
compress_finish_par_opt(stream, bs, chunks, chunk_bit_lengths);
free(chunk_bit_lengths);
ippsFree(pStates);
return;
}
#endif
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
/* compress 4d strided array in parallel */
static void
_t2(compress_strided_omp, Scalar, 4)(zfp_stream* stream, const zfp_field* field)
{
/* array metadata */
const Scalar* data = field->data;
uint nx = field->nx;
uint ny = field->ny;
uint nz = field->nz;
uint nw = field->nw;
int sx = field->sx ? field->sx : 1;
int sy = field->sy ? field->sy : nx;
int sz = field->sz ? field->sz : (ptrdiff_t)nx * ny;
int sw = field->sw ? field->sw : (ptrdiff_t)nx * ny * nz;
/* number of omp threads, blocks, and chunks */
uint threads = thread_count_omp(stream);
uint bx = (nx + 3) / 4;
uint by = (ny + 3) / 4;
uint bz = (nz + 3) / 4;
uint bw = (nw + 3) / 4;
uint blocks = bx * by * bz * bw;
uint chunks = chunk_count_omp(stream, blocks, threads);
/* allocate per-thread streams */
bitstream** bs = compress_init_par(stream, field, chunks, blocks);
/* compress chunks of blocks in parallel */
int chunk;
#pragma omp parallel for num_threads(threads)
for (chunk = 0; chunk < (int)chunks; chunk++) {
/* determine range of block indices assigned to this thread */
uint bmin = chunk_offset(blocks, chunks, chunk + 0);
uint bmax = chunk_offset(blocks, chunks, chunk + 1);
uint block;
/* set up thread-local bit stream */
zfp_stream s = *stream;
zfp_stream_set_bit_stream(&s, bs[chunk]);
/* compress sequence of blocks */
for (block = bmin; block < bmax; block++) {
/* determine block origin (x, y, z, w) within array */
const Scalar* p = data;
uint b = block;
uint x, y, z, w;
x = 4 * (b % bx); b /= bx;
y = 4 * (b % by); b /= by;
z = 4 * (b % bz); b /= bz;
w = 4 * b;
p += sx * (ptrdiff_t)x + sy * (ptrdiff_t)y + sz * (ptrdiff_t)z + sw * (ptrdiff_t)w;
/* compress partial or full block */
if (nx - x < 4 || ny - y < 4 || nz - z < 4 || nw - w < 4)
_t2(zfp_encode_partial_block_strided, Scalar, 4)(&s, p, MIN(nx - x, 4u), MIN(ny - y, 4u), MIN(nz - z, 4u), MIN(nw - w, 4u), sx, sy, sz, sw);
else
_t2(zfp_encode_block_strided, Scalar, 4)(&s, p, sx, sy, sz, sw);
}
}
/* concatenate per-thread streams */
compress_finish_par(stream, bs, chunks);
}
#endif
|
copy-meet.c | int main() {
int X = 42;
int Y = 10;
int Z = 20;
#pragma omp parallel
{
int t1;
while (1) {
Z = Y;
if (X < 10) {
Y = Y + 1;
#pragma omp barrier
break;
}
t1 = 10;
#pragma omp barrier
X = Z + Y + 3;
#pragma omp barrier
}
}
}
|
pdf_fmt_plug.c | /* PDF cracker patch for JtR. Hacked together during Monsoon of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>
*
* Uses code from Sumatra PDF and MuPDF which are under GPL.
*
* Edited by Shane Quigley in 2013.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_pdf;
#elif FMT_REGISTERS_H
john_register_one(&fmt_pdf);
#else
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 64
#endif
#endif
#include "arch.h"
#include "params.h"
#include "common.h"
#include "formats.h"
#include "misc.h"
#include "md5.h"
#include "aes.h"
#include "sha2.h"
#include "rc4.h"
#include "pdfcrack_md5.h"
#include "loader.h"
#include "memdbg.h"
#define FORMAT_LABEL "PDF"
#define FORMAT_NAME ""
#define FORMAT_TAG "$pdf$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define FORMAT_TAG_OLD "$pdf$Standard*"
#define FORMAT_TAG_OLD_LEN (sizeof(FORMAT_TAG_OLD)-1)
#define ALGORITHM_NAME "MD5 SHA2 RC4/AES 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1000
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 0
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 1
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#if defined (_OPENMP)
static int omp_t = 1;
#endif
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static int *cracked;
static int any_cracked;
static size_t cracked_size;
static struct custom_salt {
int V;
int R;
int P;
char encrypt_metadata;
unsigned char u[127];
unsigned char o[127];
unsigned char ue[32];
unsigned char oe[32];
unsigned char id[128];
int length;
int length_id;
int length_u;
int length_o;
int length_ue;
int length_oe;
} *crypt_out;
static struct fmt_tests pdf_tests[] = {
{"$pdf$4*4*128*-1028*1*16*e03460febe17a048b0adc7f7631bcc56*32*3456205208ad52066d5604018d498a6400000000000000000000000000000000*32*6d598152b22f8fa8085b19a866dce1317f645788a065a74831588a739a579ac4", "openwall"},
{"$pdf$2*3*128*-4*1*16*34b1b6e593787af681a9b63fa8bf563b*32*289ece9b5ce451a5d7064693dab3badf101112131415161718191a1b1c1d1e1f*32*badad1e86442699427116d3e5d5271bc80a27814fc5e80f815efeef839354c5f", "test"},
{"$pdf$4*4*128*-1028*1*16*c015cff8dbf99345ac91c84a45667784*32*0231a4c9cae29b53892874e168cfae9600000000000000000000000000000000*32*137ad7063db5114a66ce1900d47e5cab9c5d7053487d92ac978f54db86eca393", "testpassword"},
{"$pdf$5*6*256*-1028*1*16*05e5abeb21ad2e47adac1c2b2c7b7a31*127*51d3a6a09a675503383e5bc0b53da77ec5d5ea1d1998fb94e00a02a1c2e49313c177905272a4e8e68b382254ec8ed74800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*127*dc38f01ef129aae2fca847396465ed518f9c7cf4f2c8cb4399a849d0fe9110227739ab88ddc9a6cf388ae11941270af500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*32*b8e137baf316e0789ffa73f888d26495c14d31f2cfff3799e339e2fa078649f5*32*835a9e07461992791914c3d62d37493e07d140937529ab43e26ac2a657152c3c", "testpassword"},
{"$pdf$5*5*256*-1028*1*16*762896ef582ca042a15f380c63ab9f2c*127*8713e2afdb65df1d3801f77a4c4da4905c49495e7103afc2deb06d9fba7949a565143288823871270d9d882075a75da600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*127*15d0b992974ff80529e4b616b8c4c79d787705b6c8a9e0f85446498ae2432e0027d8406b57f78b60b11341a0757d7c4a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000*32*a7a0f3891b469ba7261ce04752dad9c6de0db9c4155c4180e721938a7d9666c7*32*2fa9a0c52badebae2c19dfa7b0005a9cfc909b92babbe7db66a794e96a9f91e3", "openwall"},
/* following are old-style hashes */
{"$pdf$Standard*badad1e86442699427116d3e5d5271bc80a27814fc5e80f815efeef839354c5f*289ece9b5ce451a5d7064693dab3badf101112131415161718191a1b1c1d1e1f*16*34b1b6e593787af681a9b63fa8bf563b*1*1*0*1*4*128*-4*3*2", "test"},
{"$pdf$Standard*9a1156c38ab8177598d1608df7d7e340ae639679bd66bc4cda9bc9a4eedeb170*1f300cd939dd5cf0920c787f12d16be22205e55a5bec5c9c6d563ab4fd0770d7*16*c015cff8dbf99345ac91c84a45667784*1*1*0*1*6*40*-4*2*1", "testpassword"},
{"$pdf$Standard*7303809eaf677bdb5ca64b9d8cb0ccdd47d09a7b28ad5aa522c62685c6d9e499*bf38d7a59daaf38365a338e1fc07976102f1dfd6bdb52072032f57920109b43a*16*c56bbc4145d25b468a873618cd71c2d3*1*1*0*1*6*40*-4*2*1", "test"},
{"$pdf$Standard*137ad7063db5114a66ce1900d47e5cab9c5d7053487d92ac978f54db86eca393*0231a4c9cae29b53892874e168cfae9600000000000000000000000000000000*16*c015cff8dbf99345ac91c84a45667784*1*1*0*1*6*128*-1028*3*2", "testpassword"},
{"$pdf$Standard*d83a8ab680f144dfb2ff2334c206a6060779e007701ab881767f961aecda7984*a5ed4de7e078cb75dfdcd63e8da7a25800000000000000000000000000000000*16*06a7f710cf8dfafbd394540d40984ae2*1*1*0*1*4*128*-1028*3*2", "July2099"},
{"$pdf$Standard*6a80a547b8b8b7636fcc5b322f1c63ce4b670c9b01f2aace09e48d85e1f19f83*e64eb62fc46be66e33571d50a29b464100000000000000000000000000000000*16*14a8c53ffa4a79b3ed9421ef15618420*1*1*0*1*4*128*-1028*3*2", "38r285a9"},
{"$pdf$Standard*2446dd5ed2e18b3ce1ac9b56733226018e3f5c2639051eb1c9b2b215b30bc820*fa3af175d761963c8449ee7015b7770800000000000000000000000000000000*16*12a4da1abe6b7a1ceb84610bad87236d*1*1*0*1*4*128*-1028*3*2", "WHATwhatWHERE?"},
{"$pdf$Standard*e600ecc20288ad8b0d64a929c6a83ee2517679aa0218beceea8b7986726a8cdb*38aca54678d67c003a8193381b0fa1cc101112131415161718191a1b1c1d1e1f*16*1521fbe61419fcad51878cc5d478d5ff*1*1*0*1*4*128*-3904*3*2", ""},
/* CMIYC 2013 "pro" hashes */
{"$pdf$4*4*128*-4*1*16*f7bc2744e1652cf61ca83cac8fccb535*32*f55cc5032f04b985c5aeacde5ec4270f0122456a91bae5134273a6db134c87c4*32*785d891cdcb5efa59893c78f37e7b75acef8924951039b4fa13f62d92bb3b660", "L4sV3g4z"},
{"$pdf$4*4*128*-4*1*16*ec8ea2af2977db1faa4a955904dc956f*32*fc413edb049720b1f8eac87a358faa740122456a91bae5134273a6db134c87c4*32*1ba7aed2f19c77ac6b5061230b62e80b48fc42918f92aef689ceb07d26204991", "ZZt0pr0x"},
{"$pdf$4*4*128*-4*1*16*56761d6da774d8d47387dccf1a84428c*32*640782cab5b7c8f6cf5eab82c38016540122456a91bae5134273a6db134c87c4*32*b5720d5f3d9675a280c6bb8050cbb169e039b578b2de4a42a40dc14765e064cf", "24Le`m0ns"},
/* This hash exposed a problem with our length_id check */
{"$pdf$1*2*40*-4*1*36*65623237393831382d636439372d343130332d613835372d343164303037316639386134*32*c7230519f7db63ab1676fa30686428f0f997932bf831f1c1dcfa48cfb3b7fe99*32*161cd2f7c95283ca9db930b36aad3571ee6f5fb5632f30dc790e19c5069c86b8", "vision"},
{NULL}
};
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
any_cracked = 0;
cracked_size = sizeof(*cracked) * self->params.max_keys_per_crypt;
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr;
char *p;
int res;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* V */
goto err;
if (!isdec(p)) goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* R */
goto err;
if (!isdec(p)) goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length */
goto err;
if (!isdec(p)) goto err;
res = atoi(p);
if (res > 256)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* P */
goto err;
if (!isdec_negok(p)) goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* encrypt_metadata */
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length_id */
goto err;
if (!isdec(p)) goto err;
res = atoi(p);
if (res > 128)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* id */
goto err;
if (strlen(p) != res * 2)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length_u */
goto err;
if (!isdec(p)) goto err;
res = atoi(p);
if (res > 127)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* u */
goto err;
if (strlen(p) != res * 2)
goto err;
if (!ishexlc(p))
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* length_o */
goto err;
if (!isdec(p)) goto err;
res = atoi(p);
if (res > 127)
goto err;
if ((p = strtokm(NULL, "*")) == NULL) /* o */
goto err;
if (strlen(p) != res * 2)
goto err;
if (!ishexlc(p))
goto err;
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static int old_valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *ptr, *keeptr;
int res;
if (strncmp(ciphertext, FORMAT_TAG_OLD, FORMAT_TAG_OLD_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_OLD_LEN;
if (!(ptr = strtokm(ctcopy, "*"))) /* o_string */
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* u_string */
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* fileIDLen */
goto error;
if (strncmp(ptr, "16", 2))
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* fileID */
goto error;
if (!ishexlc(ptr))
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* encryptMetaData */
goto error;
res = atoi(ptr);
if (res != 0 && res != 1)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* work_with_user */
goto error;
res = atoi(ptr);
if (res != 0 && res != 1)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* have_userpassword */
goto error;
res = atoi(ptr);
if (res != 0 && res != 1)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* version_major */
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* version_minor */
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* length */
goto error;
res = atoi(ptr);
if (res < 0 || res > 256)
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* permissions */
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* revision */
goto error;
if (!(ptr = strtokm(NULL, "*"))) /* version */
goto error;
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
char * convert_old_to_new(char ciphertext[])
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *out = mem_alloc_tiny(strlen(ctcopy), MEM_ALIGN_NONE);
const char *fields[14];
char *p;
int c = 0;
p = strtokm(ctcopy, "*");
for (c = 0; c < 14; c++) {
fields[c] = p;
p = strtokm(NULL, "*");
}
strcpy(out,FORMAT_TAG);
strcat(out,fields[13]);
strcat(out,"*");
strcat(out,fields[12]);
strcat(out,"*");
strcat(out,fields[10]);
strcat(out,"*");
strcat(out,fields[11]);
strcat(out,"*");
strcat(out,fields[5]);
strcat(out,"*");
strcat(out,fields[3]);
strcat(out,"*");
strcat(out,fields[4]);
strcat(out,"*32*");
strcat(out,fields[2]);
strcat(out,"*32*");
strcat(out,fields[1]);
MEM_FREE(keeptr);
return out;
}
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
// Convert old format to new one
if (!strncmp(split_fields[1], FORMAT_TAG_OLD, FORMAT_TAG_OLD_LEN) &&
old_valid(split_fields[1], self))
return convert_old_to_new(split_fields[1]);
return split_fields[1];
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
int i;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$pdf$" marker */
p = strtokm(ctcopy, "*");
cs.V = atoi(p);
p = strtokm(NULL, "*");
cs.R = atoi(p);
p = strtokm(NULL, "*");
cs.length = atoi(p);
p = strtokm(NULL, "*");
cs.P = atoi(p);
p = strtokm(NULL, "*");
cs.encrypt_metadata = atoi(p);
p = strtokm(NULL, "*");
cs.length_id = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length_id; i++)
cs.id[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.length_u = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length_u; i++)
cs.u[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
p = strtokm(NULL, "*");
cs.length_o = atoi(p);
p = strtokm(NULL, "*");
for (i = 0; i < cs.length_o; i++)
cs.o[i] =
atoi16[ARCH_INDEX(p[i * 2])] * 16 +
atoi16[ARCH_INDEX(p[i * 2 + 1])];
MEM_FREE(keeptr);
return (void *)&cs;
}
static void set_salt(void *salt)
{
crypt_out = (struct custom_salt *)salt;
}
static void pdf_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static const unsigned char padding[32] =
{
0x28, 0xbf, 0x4e, 0x5e, 0x4e, 0x75, 0x8a, 0x41,
0x64, 0x00, 0x4e, 0x56, 0xff, 0xfa, 0x01, 0x08,
0x2e, 0x2e, 0x00, 0xb6, 0xd0, 0x68, 0x3e, 0x80,
0x2f, 0x0c, 0xa9, 0xfe, 0x64, 0x53, 0x69, 0x7a
};
/* Compute an encryption key (PDF 1.7 algorithm 3.2) */
static void
pdf_compute_encryption_key(unsigned char *password, int pwlen, unsigned char *key)
{
unsigned char buf[32];
unsigned int p;
int n;
MD5_CTX md5;
n = crypt_out->length / 8;
/* Step 1 - copy and pad password string */
if (pwlen > 32)
pwlen = 32;
memcpy(buf, password, pwlen);
memcpy(buf + pwlen, padding, 32 - pwlen);
/* Step 2 - init md5 and pass value of step 1 */
MD5_Init(&md5);
MD5_Update(&md5, buf, 32);
/* Step 3 - pass O value */
MD5_Update(&md5, crypt_out->o, 32);
/* Step 4 - pass P value as unsigned int, low-order byte first */
p = (unsigned int) crypt_out->P;
buf[0] = (p) & 0xFF;
buf[1] = (p >> 8) & 0xFF;
buf[2] = (p >> 16) & 0xFF;
buf[3] = (p >> 24) & 0xFF;
MD5_Update(&md5, buf, 4);
/* Step 5 - pass first element of ID array */
MD5_Update(&md5, crypt_out->id, crypt_out->length_id);
/* Step 6 (revision 4 or greater) - if metadata is not encrypted pass 0xFFFFFFFF */
if (crypt_out->R >= 4)
{
if (!crypt_out->encrypt_metadata)
{
buf[0] = 0xFF;
buf[1] = 0xFF;
buf[2] = 0xFF;
buf[3] = 0xFF;
MD5_Update(&md5, buf, 4);
}
}
/* Step 7 - finish the hash */
MD5_Final(buf, &md5);
/* Step 8 (revision 3 or greater) - do some voodoo 50 times */
if (crypt_out->R >= 3)
{
/* for (i = 0; i < 50; i++)
{
MD5_Init(&md5);
MD5_Update(&md5, buf, n);
MD5_Final(buf, &md5);
} */
md5_50(buf);
}
/* Step 9 - the key is the first 'n' bytes of the result */
memcpy(key, buf, n);
}
/* Compute an encryption key (PDF 1.7 ExtensionLevel 3 algorithm 3.2a) */
static void
pdf_compute_encryption_key_r5(unsigned char *password, int pwlen, int ownerkey, unsigned char *validationkey)
{
unsigned char buffer[128 + 8 + 48];
SHA256_CTX sha256;
/* Step 2 - truncate UTF-8 password to 127 characters */
if (pwlen > 127)
pwlen = 127;
/* Step 3/4 - test password against owner/user key and compute encryption key */
memcpy(buffer, password, pwlen);
if (ownerkey)
{
memcpy(buffer + pwlen, crypt_out->o + 32, 8);
memcpy(buffer + pwlen + 8, crypt_out->u, 48);
}
else
memcpy(buffer + pwlen, crypt_out->u + 32, 8);
SHA256_Init(&sha256);
SHA256_Update(&sha256, buffer, pwlen + 8 + (ownerkey ? 48 : 0));
SHA256_Final(validationkey, &sha256);
}
/* SumatraPDF: support crypt version 5 revision 6 */
/*
* Compute an encryption key (PDF 1.7 ExtensionLevel 8 algorithm 3.2b)
* http://esec-lab.sogeti.com/post/The-undocumented-password-validation-algorithm-of-Adobe-Reader-X
*/
static void
pdf_compute_hardened_hash_r6(unsigned char *password, int pwlen, unsigned char salt[8],
unsigned char *ownerkey, unsigned char hash[32])
{
unsigned char data[(128 + 64 + 48) * 64];
unsigned char block[64];
int block_size = 32;
int data_len = 0;
int i, j, sum;
SHA256_CTX sha256;
SHA512_CTX sha384;
SHA512_CTX sha512;
AES_KEY aes;
/* Step 1: calculate initial data block */
SHA256_Init(&sha256);
SHA256_Update(&sha256, password, pwlen);
SHA256_Update(&sha256, salt, 8);
if (ownerkey)
SHA256_Update(&sha256, ownerkey, 48);
SHA256_Final(block, &sha256);
for (i = 0; i < 64 || i < data[data_len * 64 - 1] + 32; i++)
{
/* Step 2: repeat password and data block 64 times */
memcpy(data, password, pwlen);
memcpy(data + pwlen, block, block_size);
// ownerkey is always NULL
// memcpy(data + pwlen + block_size, ownerkey, ownerkey ? 48 : 0);
data_len = pwlen + block_size + (ownerkey ? 48 : 0);
for (j = 1; j < 64; j++)
memcpy(data + j * data_len, data, data_len);
/* Step 3: encrypt data using data block as key and iv */
AES_set_encrypt_key(block, 128, &aes);
// aes_crypt_cbc(&aes, AES_ENCRYPT, data_len * 64, block + 16, data, data);
AES_cbc_encrypt(data, data, data_len * 64, &aes, block + 16, AES_ENCRYPT);
/* Step 4: determine SHA-2 hash size for this round */
for (j = 0, sum = 0; j < 16; j++)
sum += data[j];
/* Step 5: calculate data block for next round */
block_size = 32 + (sum % 3) * 16;
switch (block_size)
{
case 32:
SHA256_Init(&sha256);
SHA256_Update(&sha256, data, data_len * 64);
SHA256_Final(block, &sha256);
break;
case 48:
SHA384_Init(&sha384);
SHA384_Update(&sha384, data, data_len * 64);
SHA384_Final(block, &sha384);
break;
case 64:
SHA512_Init(&sha512);
SHA512_Update(&sha512, data, data_len * 64);
SHA512_Final(block, &sha512);
break;
}
}
memset(data, 0, sizeof(data));
memcpy(hash, block, 32);
}
/* Computing the user password (PDF 1.7 algorithm 3.4 and 3.5) */
static void pdf_compute_user_password(unsigned char *password, unsigned char *output)
{
int pwlen = strlen((char*)password);
unsigned char key[128];
if (crypt_out->R == 2) {
RC4_KEY arc4;
int n;
n = crypt_out->length / 8;
pdf_compute_encryption_key(password, pwlen, key);
RC4_set_key(&arc4, n, key);
RC4(&arc4, 32, padding, output);
}
if (crypt_out->R == 3 || crypt_out->R == 4) {
unsigned char xor[32];
unsigned char digest[16];
MD5_CTX md5;
RC4_KEY arc4;
int i, x, n;
n = crypt_out->length / 8;
pdf_compute_encryption_key(password, pwlen, key);
MD5_Init(&md5);
MD5_Update(&md5, (char*)padding, 32);
MD5_Update(&md5, crypt_out->id, crypt_out->length_id);
MD5_Final(digest, &md5);
RC4_set_key(&arc4, n, key);
RC4(&arc4, 16, digest, output);
for (x = 1; x <= 19; x++) {
for (i = 0; i < n; i++)
xor[i] = key[i] ^ x;
RC4_set_key(&arc4, n, xor);
RC4(&arc4, 16, output, output);
}
memcpy(output + 16, padding, 16);
}
if (crypt_out->R == 5) {
pdf_compute_encryption_key_r5(password, pwlen, 0, output);
}
/* SumatraPDF: support crypt version 5 revision 6 */
if (crypt_out->R == 6)
pdf_compute_hardened_hash_r6(password, pwlen, crypt_out->u + 32, NULL, output);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
#if !defined(_OPENMP) && defined (__CYGWIN32__) && defined (MEMDBG_ON)
static /* work around for some 'unknown' bug in cygwin gcc when using memdbg.h code. I have NO explanation, JimF. */
#endif
unsigned char output[32];
pdf_compute_user_password((unsigned char*)saved_key[index], output);
if (crypt_out->R == 2 || crypt_out->R == 5 || crypt_out->R == 6)
if (memcmp(output, crypt_out->u, 32) == 0) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
if (crypt_out->R == 3 || crypt_out->R == 4)
if (memcmp(output, crypt_out->u, 16) == 0) {
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return cracked[index];
}
/*
* Report revision as tunable cost, since between revisions 2 and 6,
* only revisions 3 and 4 seem to have a similar c/s rate.
*/
static unsigned int pdf_revision(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->R;
}
struct fmt_main fmt_pdf = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"revision",
},
{ FORMAT_TAG, FORMAT_TAG_OLD },
pdf_tests
},
{
init,
done,
fmt_default_reset,
prepare,
valid,
fmt_default_split,
fmt_default_binary,
get_salt,
{
pdf_revision,
},
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
pdf_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_binop__plus_uint64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_01__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_03__plus_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_uint64)
// A*D function (colscale): GB (_AxD__plus_uint64)
// D*A function (rowscale): GB (_DxB__plus_uint64)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_uint64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_uint64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_uint64)
// C=scalar+B GB (_bind1st__plus_uint64)
// C=scalar+B' GB (_bind1st_tran__plus_uint64)
// C=A+scalar GB (_bind2nd__plus_uint64)
// C=A'+scalar GB (_bind2nd_tran__plus_uint64)
// C type: uint64_t
// A type: uint64_t
// B,b type: uint64_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_UINT64 || GxB_NO_PLUS_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__plus_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
SE_fg_int_extended_split_mex.c | #include "mex.h"
#include "../SE_fgg.h"
void SE_FGG_MEX_params(SE_FGG_params*, const mxArray*, int);
#define X prhs[0] // this arg is unused
#define HH prhs[1]
#define OPT prhs[2]
#define ZS prhs[3]
#define ZX prhs[4]
#define ZY prhs[5]
#define ZZ prhs[6]
#define IDX prhs[7]
#define PHI_OUT plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
const int N = mxGetM(IDX);
SE_FGG_params params;
SE_FGG_MEX_params(¶ms, OPT, N);
SE_FGG_work work;
// attach pre-computed quantities
work.zs = mxGetPr(ZS);
work.zx = mxGetPr(ZX);
work.zy = mxGetPr(ZY);
work.zz = mxGetPr(ZZ);
work.idx = (int*)mxGetData(IDX);
work.H = mxGetPr(HH);
// output vector
PHI_OUT = mxCreateDoubleMatrix(N,1,mxREAL);
double* phi = mxGetPr(PHI_OUT);
if(VERBOSE)
mexPrintf("[SE%s FG(i)] N=%d, P=%d\n",PER_STR,N,params.P);
if(N==1)
{
// Don't thread for single target
#ifdef __AVX__
SE_FGG_int_split_AVX_dispatch(phi, &work, ¶ms);
#else
SE_FGG_int_split_SSE_dispatch(phi, &work, ¶ms);
#endif
}
else
{
#ifdef _OPENMP
#pragma omp parallel default(shared)
#endif
{
// now do the work
#ifdef __AVX__
SE_FGG_int_split_AVX_dispatch(phi, &work, ¶ms);
#else
SE_FGG_int_split_SSE_dispatch(phi, &work, ¶ms);
#endif
}
}
}
|
test.c |
#include <stdio.h>
#include <omp.h>
#pragma omp requires unified_shared_memory
#include "../utilities/check.h"
#include "../utilities/utilities.h"
#define TRIALS (1)
#define N (992)
#define INIT() INIT_LOOP(N, {C[i] = 1; D[i] = i; E[i] = -i;})
#define ZERO(X) ZERO_ARRAY(N, X)
int check_results(double* A){
for (int i = 0 ; i < N ; i++){
if (A[i] != TRIALS){
printf("Error at %d, h = %lf, d = %lf\n", i, (double) TRIALS, A[i]);
return 0;
}
}
return 1;
}
int check_results_priv(double *A, double *B){
for(int i = 0 ; i < N ; i++) {
if (A[i] != TRIALS*3) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) TRIALS*2, A[i]);
return 0;
}
if (B[i] != TRIALS*7) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) TRIALS*3, B[i]);
return 0;
}
}
return 1;
}
#define CODE() \
ZERO(A); \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target teams distribute simd CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
A[i] += C[i]; \
} \
} \
success += check_results(A); \
if (success == expected) \
printf("Succeeded\n");
#define CODE_PRIV() \
ZERO(A); \
ZERO(B); \
p = 2.0; \
q = 4.0; \
success = 0; \
for (int t = 0 ; t < TRIALS ; t++) { \
_Pragma("omp target teams distribute simd CLAUSES") \
for (int i = 0 ; i < N ; i++){ \
p = 3; \
q = 7; \
A[i] += p; \
B[i] += q; \
} \
} \
success += check_results_priv(A, B); \
if (success == expected) \
printf("Succeeded\n");
int main(void) {
check_offloading();
double A[N], B[N], C[N], D[N], E[N];
int fail = 0;
int expected = 1;
int success = 0;
int chunkSize;
double p = 2.0, q = 4.0;
int nte, tl, blockSize;
INIT();
// **************************
// Series 1: no dist_schedule
// **************************
//
// Test: #iterations == #teams
//
printf("iterations = teams\n");
#define CLAUSES num_teams(992)
CODE()
#undef CLAUSES
printf("iterations > teams\n");
#define CLAUSES num_teams(256)
CODE()
#undef CLAUSES
printf("iterations < teams\n");
#define CLAUSES num_teams(1024)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,1)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static,512)\n");
#define CLAUSES num_teams(512) dist_schedule(static, 512)
CODE()
#undef CLAUSES
printf("num_teams(512) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(512) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, chunkSize)\n");
chunkSize = N / 10;
#define CLAUSES num_teams(1024) dist_schedule(static, chunkSize)
CODE()
#undef CLAUSES
printf("num_teams(1024) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(1024) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 1)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 1)
CODE()
#undef CLAUSES
printf("num_teams(3) dist_schedule(static, 3)\n");
#define CLAUSES num_teams(3) dist_schedule(static, 3)
CODE()
#undef CLAUSES
printf("num_teams(10) dist_schedule(static, 99)\n");
#define CLAUSES num_teams(10) dist_schedule(static, 99)
CODE()
#undef CLAUSES
printf("num_teams(256) dist_schedule(static, 992)\n");
#define CLAUSES num_teams(256) dist_schedule(static, 992)
CODE()
#undef CLAUSES
#if 0
printf("num_teams(256) private(p,q)\n");
#define CLAUSES num_teams(256) private(p,q)
CODE_PRIV()
#undef CLAUSES
#endif
//
// Test: firstprivate
//
#if 0
printf("num_teams(64) firstprivate(p, q)\n");
ZERO(A); ZERO(B);
p = 2.0, q = 4.0;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target // implicit firstprivate for p and q, their initial values being 2 and 4 for each target invocation
#pragma omp teams distribute num_teams(64) firstprivate(p, q)
for(int i = 0 ; i < 128 ; i++) { // 2 iterations for each team
p += 3.0; // p and q are firstprivate to the team, and as such incremented twice (2 iterations per team)
q += 7.0;
A[i] += p;
B[i] += q;
}
}
for(int i = 0 ; i < 128 ; i++) {
if (i % 2 == 0) {
if (A[i] != (2.0+3.0)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0)*TRIALS, B[i]);
fail = 1;
}
} else {
if (A[i] != (2.0+3.0*2)*TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0*2)*TRIALS, A[i]);
fail = 1;
}
if (B[i] != (4.0+7.0*2)*TRIALS) {
printf("Error at B[%d], h = %lf, d = %lf\n", i, (double) (4.0+7.0*2)*TRIALS, B[i]);
fail = 1;
}
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
#endif
//
// Test: lastprivate
//
#if 0
printf("num_teams(10) lastprivate(lastpriv)\n");
success = 0;
int lastpriv = -1;
#pragma omp target data map(tofrom:lastpriv)
#pragma omp target teams distribute simd num_teams(10) lastprivate(lastpriv)
for(int i = 0 ; i < omp_get_num_teams() ; i++)
lastpriv = omp_get_team_num();
if(lastpriv != 9) {
printf("lastpriv value is %d and should have been %d\n", lastpriv, 9);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
#endif
// // ***************************
// // Series 4: with parallel for
// // ***************************
//
// Test: simple blocking loop
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute simd num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 256 ; j += blockSize) {
for(int i = j ; i < j+blockSize; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: blocking loop where upper bound is not a multiple of tl*nte
//
printf("num_teams(nte) thread_limit(tl) with parallel for innermost\n");
success = 0;
ZERO(A); ZERO(B);
nte = 32;
tl = 64;
blockSize = tl;
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target teams distribute simd num_teams(nte) thread_limit(tl)
for(int j = 0 ; j < 510 ; j += blockSize) {
int ub = (j+blockSize < 510) ? (j+blockSize) : 512;
for(int i = j ; i < ub; i++) {
A[i] += B[i] + C[i];
}
}
}
for(int i = 0 ; i < 256 ; i++) {
if (A[i] != TRIALS) {
printf("Error at A[%d], h = %lf, d = %lf\n", i, (double) (2.0+3.0)*TRIALS, A[i]);
fail = 1;
}
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
// **************************
// Series 5: collapse
// **************************
//
// Test: 2 loops
//
printf("num_teams(512) collapse(2)\n");
success = 0;
double * S = (double *) malloc(N*N*sizeof(double));
double * T = (double *) malloc(N*N*sizeof(double));
double * U = (double *) malloc(N*N*sizeof(double));
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
{
S[i*N+j] = 0.0;
T[i*N+j] = 1.0;
U[i*N+j] = 2.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target data map(tofrom:S[:N*N]), map(to:T[:N*N],U[:N*N])
#pragma omp target teams distribute simd num_teams(512) collapse(2)
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
S[i*N+j] += T[i*N+j] + U[i*N+j]; // += 3 at each t
}
for (int i = 0 ; i < N ; i++)
for (int j = 0 ; j < N ; j++)
if (S[i*N+j] != TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, S[i*N+j]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
//
// Test: 3 loops
//
printf("num_teams(512) collapse(3)\n");
success = 0;
int M = N/8;
double * V = (double *) malloc(M*M*M*sizeof(double));
double * Z = (double *) malloc(M*M*M*sizeof(double));
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
{
V[i*M*M+j*M+k] = 2.0;
Z[i*M*M+j*M+k] = 3.0;
}
for (int t = 0 ; t < TRIALS ; t++) {
#pragma omp target data map(tofrom:V[:M*M*M]), map(to:Z[:M*M*M])
#pragma omp target teams distribute simd num_teams(512) collapse(3)
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
V[i*M*M+j*M+k] += Z[i*M*M+j*M+k]; // += 3 at each t
}
for (int i = 0 ; i < M ; i++)
for (int j = 0 ; j < M ; j++)
for (int k = 0 ; k < M ; k++)
if (V[i*M*M+j*M+k] != 2.0+TRIALS*3.0) {
printf("Error at (%d,%d), h = %lf, d = %lf\n", i, j, (double) TRIALS*3.0, V[i*M*M+j*M+k]);
fail = 1;
}
if(fail) printf("Failed\n");
else printf("Succeeded\n");
return 0;
}
|
operators.h | /*
Project Name : OpenMEEG
© INRIA and ENPC (contributors: Geoffray ADDE, Maureen CLERC, Alexandre
GRAMFORT, Renaud KERIVEN, Jan KYBIC, Perrine LANDREAU, Théodore PAPADOPOULO,
Emmanuel OLIVI
Maureen.Clerc.AT.inria.fr, keriven.AT.certis.enpc.fr,
kybic.AT.fel.cvut.cz, papadop.AT.inria.fr)
The OpenMEEG software is a C++ package for solving the forward/inverse
problems of electroencephalography and magnetoencephalography.
This software is governed by the CeCILL-B license under French law and
abiding by the rules of distribution of free software. You can use,
modify and/ or redistribute the software under the terms of the CeCILL-B
license as circulated by CEA, CNRS and INRIA at the following URL
"http://www.cecill.info".
As a counterpart to the access to the source code and rights to copy,
modify and redistribute granted by the license, users are provided only
with a limited warranty and the software's authors, the holders of the
economic rights, and the successive licensors have only limited
liability.
In this respect, the user's attention is drawn to the risks associated
with loading, using, modifying and/or developing or reproducing the
software by the user in light of its specific status of free software,
that may mean that it is complicated to manipulate, and that also
therefore means that it is reserved for developers and experienced
professionals having in-depth computer knowledge. Users are therefore
encouraged to load and test the software's suitability as regards their
requirements in conditions enabling the security of their systems and/or
data to be ensured and, more generally, to use and operate it in the
same conditions as regards security.
The fact that you are presently reading this means that you have had
knowledge of the CeCILL-B license and that you accept its terms.
*/
/// \file
/// \brief File containing the integral operators.
#pragma once
#include <iostream>
#include <vector.h>
#include <matrix.h>
#include <symmatrix.h>
#include <sparse_matrix.h>
#include <geometry.h>
#include <integrator.h>
#include <analytics.h>
#include <progressbar.h>
namespace OpenMEEG {
// TODO: Use overloading and remove the internal suffix.
void operatorSinternal(const Mesh&,Matrix&,const Vertices&,const double&);
void operatorDinternal(const Mesh&,Matrix&,const Vertices&,const double&);
void operatorFerguson(const Vect3&,const Mesh&,Matrix&,const unsigned&,const double&);
void operatorDipolePotDer(const Vect3&,const Vect3&,const Mesh&,Vector&,const double&,const unsigned,const bool);
void operatorDipolePot(const Vect3&,const Vect3&,const Mesh&,Vector&,const double&,const unsigned,const bool);
namespace Details {
// #define ADAPT_LHS
template <template <typename,typename> class Integrator>
void operatorDipolePot(const Vect3& r0,const Vect3& q,const Mesh& m,Vector& rhs,const double& coeff,const unsigned gauss_order) {
static analyticDipPot anaDP;
anaDP.init(q,r0);
Integrator<double,analyticDipPot> gauss(0.001);
gauss->setOrder(gauss_order);
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle : m.triangles()) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit=m.triangles().begin();tit<m.triangles().end();++tit) {
const Triangle& triangle = *tit;
#else
for (int i=0;i<m.triangles().size();++i) {
const Triangle& triangle = *(m.triangles().begin()+i);
#endif
const double d = gauss->integrate(anaDP,triangle);
#pragma omp critical
rhs(triangle.index()) += d*coeff;
}
}
// T can be a Matrix or SymMatrix
template <typename T>
inline void operatorD(const Triangle& T1,const Triangle& T2,T& mat,const double& coeff,const unsigned gauss_order) {
//this version of operatorD add in the Matrix the contribution of T2 on T1
// for all the P1 functions it gets involved
// consider varying order of quadrature with the distance between T1 and T2
analyticD3 analyD(T2);
#ifdef ADAPT_LHS
AdaptiveIntegrator<Vect3, analyticD3> gauss(0.005);
gauss.setOrder(gauss_order);
#else
STATIC_OMP Integrator<Vect3, analyticD3> gauss(gauss_order);
#endif
const Vect3 total = gauss.integrate(analyD,T1);
for (unsigned i=0; i<3; ++i)
mat(T1.index(),T2.vertex(i).index()) += total(i)*coeff;
}
inline double operatorS(const analyticS& analyS,const Triangle& T2,const unsigned gauss_order) {
#ifdef ADAPT_LHS
AdaptiveIntegrator<double,analyticS> gauss(0.005);
#else
STATIC_OMP Integrator<double,analyticS> gauss;
#endif
gauss.setOrder(gauss_order);
return gauss.integrate(analyS,T2);
}
template <typename T>
inline double operatorN(const Vertex& V1,const Vertex& V2,const Mesh& m1,const Mesh& m2,const T& mat) {
const bool same_shared_vertex = ((&m1!=&m2) && (V1==V2));
const double factor = (same_shared_vertex) ? 0.5 : 0.25;
double result = 0.0;
for (const auto& tp1 : m1.triangles(V1)) {
const Edge& edge1 = tp1->edge(V1);
const Vect3& CB1 = edge1.vertex(0)-edge1.vertex(1);
const unsigned ind1 = tp1->index()-m1.triangles().front().index();
for (const auto& tp2 : m2.triangles(V2)) {
const unsigned ind2 = tp2->index()-m2.triangles().front().index();
// In the second case, we here divided (precalculated) operatorS by the product of areas.
const double Iqr = (m1.current_barrier() || m2.current_barrier()) ? mat(ind1,ind2) : mat(tp1->index(),tp2->index())/(tp1->area()*tp2->area());
const Edge& edge2 = tp2->edge(V2);
const Vect3& CB2 = edge2.vertex(0)-edge2.vertex(1);
result -= factor*Iqr*dotprod(CB1,CB2);
}
}
return result;
}
inline Vect3 operatorFerguson(const Vect3& x,const Vertex& V,const Mesh& m) {
Vect3 result;
result = 0.0;
// Loop over triangles of which V is a vertex
for (const auto& tp : m.triangles(V)) {
const Triangle& T = *tp;
const Edge& edge = T.edge(V);
// A, B are the two opposite vertices to V (triangle A, B, V)
const Vertex& A = edge.vertex(0);
const Vertex& B = edge.vertex(1);
const Vect3 AB = (A-B)*(0.5/T.area());
analyticS analyS(V,A,B);
const double opS = analyS.f(x);
result += (AB*opS);
}
return result;
}
inline double operatorP1P0(const Triangle& T2,const Vertex& V1) {
double result = 0.;
if (T2.contains(V1))
result = T2.area()/3.0;
return result;
}
template <typename T>
void operatorD(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function (OPTIMIZED VERSION) has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be appleid to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
// In this version of the function, in order to skip multiple computations of the same quantities
// loops are run over the triangles but the Matrix cannot be filled in this function anymore
// That's why the filling is done is function Details::operatorD
//
ProgressBar pb(m1.triangles().size());
const Triangles& m1_triangles = m1.triangles();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle1 : m1_triangles) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit1=m1_triangles.begin();tit1<m1_triangles.end();++tit1) {
const Triangle& triangle1 = *tit1;
#else
for (int i1=0; i1 < m1_triangles.size(); ++i1) {
const Triangle& triangle1 = *(m1_triangles.begin()+i1);
#endif
for (const auto& triangle2 : m2.triangles())
Details::operatorD(triangle1,triangle2,mat,coeff,gauss_order);
++pb;
}
}
}
template <typename T>
void operatorN(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be applied to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR N ... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl;
if (&m1==&m2) {
auto NUpdate = [&](const Mesh& m,const auto& M) {
ProgressBar pb(m1.vertices().size());
for (auto vit1=m.vertices().begin();vit1!=m.vertices().end();++vit1) {
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_ITERATOR
for (auto vit2=vit1;vit2<m.vertices().end();++vit2) {
#else
for (int i2=0;i2<=vit1-m1.vertices().begin();++i2) {
const auto vit2 = m1.vertices().begin()+i2;
#endif
mat((*vit1)->index(),(*vit2)->index()) += Details::operatorN(**vit1,**vit2,m,m,M)*coeff;
}
++pb;
}
};
if (m1.current_barrier()) {
// Precompute operator S divided by the product of triangles area.
ProgressBar pb(m1.triangles().size());
SymMatrix matS(m1.triangles().size());
for (Triangles::const_iterator tit1=m1.triangles().begin();tit1!=m1.triangles().end();++tit1) {
const analyticS analyS(*tit1);
const unsigned ind1 = tit1->index()-m1.triangles().front().index();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=tit1;tit2<m1.triangles().end();++tit2) {
#else
for (int i2=tit1-m1.triangles().begin();i2<m1.triangles().size();++i2) {
const Triangles::const_iterator tit2 = m1.triangles().begin()+i2;
#endif
const unsigned ind2 = tit2->index()-m2.triangles().front().index();
matS(ind1,ind2) = Details::operatorS(analyS,*tit2,gauss_order)/(tit1->area()*tit2->area());
}
++pb;
}
NUpdate(m1,matS);
} else {
NUpdate(m1,mat);
}
} else {
auto NUpdate = [&](const Mesh& m1,const Mesh& m2,const auto& M) {
ProgressBar pb(m1.vertices().size());
const VerticesRefs& v2 = m2.vertices();
for (const auto& vertex1 : m1.vertices()) {
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& vertex2 : v2) {
#elif defined OPENMP_ITERATOR
for (auto vit2=v2.begin();vit2<v2.end();++vit2) {
const Vertex* vertex2 = *vit2;
#else
for (int i2=0;i2<v2.size();++i2) {
const Vertex* vertex2 = *(v2.begin()+i2);
#endif
mat(vertex1->index(),vertex2->index()) += Details::operatorN(*vertex1,*vertex2,m1,m2,M)*coeff;
}
++pb;
}
};
if (m1.current_barrier() || m2.current_barrier()) {
// Precompute operator S divided by the product of triangles area.
Matrix matS(m1.triangles().size(),m2.triangles().size());
ProgressBar pb(m1.triangles().size());
unsigned i = 0;
for (const auto& triangle1 : m1.triangles()) {
const analyticS analyS(triangle1);
const unsigned ind1 = triangle1.index()-m1.triangles().front().index();
const Triangles& m2_triangles = m2.triangles();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle2 : m2_triangles) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=m2_triangles.begin();tit2<m2_triangles.end();++tit2) {
const Triangle& triangle2 = *tit2;
#else
for (int i2=0;i2<m2_triangles.size();++i2) {
const Triangle& triangle2 = *(m2_triangles.begin()+i2);
#endif
const unsigned ind2 = triangle2.index()-m2_triangles.front().index();
matS(ind1,ind2) = Details::operatorS(analyS,triangle2,gauss_order)/(triangle1.area()*triangle2.area());
}
++pb;
}
NUpdate(m1,m2,matS);
} else {
NUpdate(m1,m2,mat);
}
}
}
template <typename T>
void operatorS(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be applied to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR S ... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl;
// The operator S is given by Sij=\Int G*PSI(I, i)*Psi(J, j) with
// PSI(A, a) is a P0 test function on layer A and triangle a
if (&m1==&m2) {
ProgressBar pb(m1.triangles().size());
for (Triangles::const_iterator tit1=m1.triangles().begin(); tit1!=m1.triangles().end(); ++tit1,++pb) {
const analyticS analyS(*tit1);
#pragma omp parallel for
#if defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=tit1;tit2<m1.triangles().end();++tit2) {
#else
for (int i2=tit1-m1.triangles().begin();i2<m1.triangles().size();++i2) {
const Triangles::const_iterator tit2 = m1.triangles().begin()+i2;
#endif
mat(tit1->index(),tit2->index()) = Details::operatorS(analyS,*tit2,gauss_order)*coeff;
}
}
} else {
// TODO check the symmetry of Details::operatorS.
// if we invert tit1 with tit2: results in HeadMat differs at 4.e-5 which is too big.
// using ADAPT_LHS with tolerance at 0.000005 (for Details::opS) drops this at 6.e-6. (but increase the computation time)
ProgressBar pb(m1.triangles().size());
for (const auto& triangle1 : m1.triangles()) {
const analyticS analyS(triangle1);
const Triangles& m2_triangles = m2.triangles();
#pragma omp parallel for
#if defined NO_OPENMP || defined OPENMP_RANGEFOR
for (const auto& triangle2 : m2_triangles) {
#elif defined OPENMP_ITERATOR
for (Triangles::const_iterator tit2=m2_triangles.begin();tit2<m2_triangles.end();++tit2) {
const Triangle& triangle2 = *tit2;
#else
for (int i2=0;i2<m2_triangles.size();++i2) {
const Triangle& triangle2 = *(m2_triangles.begin()+i2);
#endif
mat(triangle1.index(),triangle2.index()) = Details::operatorS(analyS,triangle2,gauss_order)*coeff;
}
++pb;
}
}
}
template <typename T>
void operatorD(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function (OPTIMIZED VERSION) has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be appleid to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR D... (arg : mesh " << m1.name() << " , mesh " << m2.name() << " )" << std::endl;
Details::operatorD(m1,m2,mat,coeff,gauss_order);
}
template <typename T>
void operatorDstar(const Mesh& m1,const Mesh& m2,T& mat,const double& coeff,const unsigned gauss_order) {
// This function (OPTIMIZED VERSION) has the following arguments:
// the 2 interacting meshes
// the storage Matrix for the result
// the coefficient to be appleid to each matrix element (depending on conductivities, ...)
// the gauss order parameter (for adaptive integration)
std::cout << "OPERATOR D*... (arg : mesh " << m1.name() << " , mesh " << m2.name() << ')' << std::endl;
Details::operatorD(m2,m1,mat,coeff,gauss_order);
}
template <typename T>
void operatorP1P0(const Mesh& m,T& mat,const double& coeff) {
// This time mat(i, j)+= ... the Matrix is incremented by the P1P0 operator
std::cout << "OPERATOR P1P0... (arg : mesh " << m.name() << " )" << std::endl;
for (const auto& triangle : m.triangles())
for (const auto& vertex : triangle)
mat(triangle.index(),vertex->index()) += Details::operatorP1P0(triangle,*vertex)*coeff;
}
}
|
GB_unaryop__lnot_int8_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_int8_uint64
// op(A') function: GB_tran__lnot_int8_uint64
// C type: int8_t
// A type: uint64_t
// cast: int8_t cij = (int8_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
int8_t z = (int8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_INT8 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_int8_uint64
(
int8_t *Cx, // Cx and Ax may be aliased
uint64_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_int8_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dftcommon.c | // Copyright Naoki Shibata 2010 - 2017.
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <ctype.h>
#include <inttypes.h>
#include <assert.h>
#if defined(POWER64_UNDEF_USE_EXTERN_INLINES)
// This is a workaround required to cross compile for PPC64 binaries
#include <features.h>
#ifdef __USE_EXTERN_INLINES
#undef __USE_EXTERN_INLINES
#endif
#endif
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "misc.h"
#include "sleef.h"
#define IMPORT_IS_EXPORT
#include "sleefdft.h"
#include "dispatchparam.h"
#include "dftcommon.h"
#include "common.h"
#include "arraymap.h"
#define MAGIC_FLOAT 0x31415926
#define MAGIC_DOUBLE 0x27182818
#define MAGIC_LONGDOUBLE 0x14142135
#define MAGIC_QUAD 0x33166247
#define MAGIC2D_FLOAT 0x22360679
#define MAGIC2D_DOUBLE 0x17320508
#define MAGIC2D_LONGDOUBLE 0x26457513
#define MAGIC2D_QUAD 0x36055512
const char *configStr[] = { "ST", "ST stream", "MT", "MT stream" };
static int parsePathStr(char *p, int *path, int *config, int pathLenMax, int log2len) {
int pathLen = 0, l2l = 0;
for(;;) {
while(*p == ' ') p++;
if (*p == '\0') break;
if (!isdigit(*p)) return -1;
pathLen++;
if (pathLen >= pathLenMax) return -2;
int n = 0;
while(isdigit(*p)) n = n * 10 + *p++ - '0';
if (n > MAXBUTWIDTH) return -6;
path[pathLen-1] = n;
l2l += n;
config[pathLen-1] = 0;
if (*p != '(') continue;
int c;
for(c=3;c>=0;c--) if (strncmp(p+1, configStr[c], strlen(configStr[c])) == 0) break;
if (c == -1) return -3;
p += strlen(configStr[c]) + 1;
if (*p != ')') return -4;
p++;
config[pathLen-1] = c;
}
if (l2l != log2len) return -5;
return pathLen;
}
EXPORT void SleefDFT_setPath(SleefDFT *p, char *pathStr) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
int path[32], config[32];
int pathLen = parsePathStr(pathStr, path, config, 31, p->log2len);
if (pathLen < 0) {
if ((p->mode & SLEEF_MODE_VERBOSE) != 0) printf("Error %d in parsing path string : %s\n", pathLen, pathStr);
return;
}
for(uint32_t j = 0;j <= p->log2len;j++) p->bestPath[j] = 0;
for(int level = p->log2len, j=0;level > 0 && j < pathLen;) {
p->bestPath[level] = path[j];
p->bestPathConfig[level] = config[j];
level -= path[j];
j++;
}
p->pathLen = 0;
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++;
if ((p->mode & SLEEF_MODE_VERBOSE) != 0) {
printf("Set path : ");
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) printf("%d(%s) ", p->bestPath[j], configStr[p->bestPathConfig[j]]);
printf("\n");
}
}
void freeTables(SleefDFT *p) {
for(int N=1;N<=MAXBUTWIDTH;N++) {
for(uint32_t level=N;level<=p->log2len;level++) {
Sleef_free(p->tbl[N][level]);
}
free(p->tbl[N]);
p->tbl[N] = NULL;
}
}
EXPORT void SleefDFT_dispose(SleefDFT *p) {
if (p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD)) {
Sleef_free(p->tBuf);
SleefDFT_dispose(p->instH);
if (p->hlen != p->vlen) SleefDFT_dispose(p->instV);
p->magic = 0;
free(p);
return;
}
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
if (p->log2len <= 1) {
p->magic = 0;
free(p);
return;
}
if ((p->mode & SLEEF_MODE_REAL) != 0) {
Sleef_free(p->rtCoef1);
Sleef_free(p->rtCoef0);
p->rtCoef0 = p->rtCoef1 = NULL;
}
for(int level = p->log2len;level >= 1;level--) {
Sleef_free(p->perm[level]);
}
free(p->perm);
p->perm = NULL;
freeTables(p);
p->magic = 0;
free(p);
}
uint32_t ilog2(uint32_t q) {
static const uint32_t tab[] = {0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4};
uint32_t r = 0,qq;
if (q & 0xffff0000) r = 16;
q >>= r;
qq = q | (q >> 1);
qq |= (qq >> 2);
qq = ((qq & 0x10) >> 4) | ((qq & 0x100) >> 7) | ((qq & 0x1000) >> 10);
return r + tab[qq] * 4 + tab[q >> (tab[qq] * 4)] - 1;
}
//
char *dftPlanFilePath = NULL;
char *archID = NULL;
uint64_t planMode = SLEEF_PLAN_REFERTOENVVAR;
ArrayMap *planMap = NULL;
int planFilePathSet = 0, planFileLoaded = 0;
#ifdef _OPENMP
omp_lock_t planMapLock;
int planMapLockInitialized = 0;
#endif
static void initPlanMapLock() {
#ifdef _OPENMP
#pragma omp critical
{
if (!planMapLockInitialized) {
planMapLockInitialized = 1;
omp_init_lock(&planMapLock);
}
}
#endif
}
static void planMap_clear() {
if (planMap != NULL) ArrayMap_dispose(planMap);
planMap = NULL;
}
EXPORT void SleefDFT_setPlanFilePath(const char *path, const char *arch, uint64_t mode) {
initPlanMapLock();
if ((mode & SLEEF_PLAN_RESET) != 0) {
planMap_clear();
planFileLoaded = 0;
planFilePathSet = 0;
}
if (dftPlanFilePath != NULL) free(dftPlanFilePath);
if (path != NULL) {
dftPlanFilePath = malloc(strlen(path)+10);
strcpy(dftPlanFilePath, path);
} else {
dftPlanFilePath = NULL;
}
if (archID != NULL) free(archID);
if (arch == NULL) arch = Sleef_getCpuIdString();
archID = malloc(strlen(arch)+10);
strcpy(archID, arch);
planMode = mode;
planFilePathSet = 1;
}
static void loadPlanFromFile() {
if (planFilePathSet == 0 && (planMode & SLEEF_PLAN_REFERTOENVVAR) != 0) {
char *s = getenv(ENVVAR);
if (s != NULL) SleefDFT_setPlanFilePath(s, NULL, planMode);
}
if (planMap != NULL) ArrayMap_dispose(planMap);
if (dftPlanFilePath != NULL && (planMode & SLEEF_PLAN_RESET) == 0) {
planMap = ArrayMap_load(dftPlanFilePath, archID, PLANFILEID, (planMode & SLEEF_PLAN_NOLOCK) == 0);
}
if (planMap == NULL) planMap = initArrayMap();
planFileLoaded = 1;
}
static void savePlanToFile() {
assert(planFileLoaded);
if ((planMode & SLEEF_PLAN_READONLY) == 0 && dftPlanFilePath != NULL) {
ArrayMap_save(planMap, dftPlanFilePath, archID, PLANFILEID);
}
}
#define CATBIT 8
#define BASETYPEIDBIT 2
#define LOG2LENBIT 8
#define DIRBIT 1
#define BUTSTATBIT 16
static uint64_t keyButStat(int baseTypeID, int log2len, int dir, int butStat) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 0;
uint64_t k = 0;
k = (k << BUTSTATBIT) | (butStat & ~(~(uint64_t)0 << BUTSTATBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
#define LEVELBIT LOG2LENBIT
#define BUTCONFIGBIT 8
#define TRANSCONFIGBIT 8
static uint64_t keyTrans(int baseTypeID, int hlen, int vlen, int transConfig) {
int max = MAX(hlen, vlen), min = MIN(hlen, vlen);
int cat = 2;
uint64_t k = 0;
k = (k << TRANSCONFIGBIT) | (transConfig & ~(~(uint64_t)0 << TRANSCONFIGBIT));
k = (k << LOG2LENBIT) | (max & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << LOG2LENBIT) | (min & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t keyPath(int baseTypeID, int log2len, int dir, int level, int config) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 3;
uint64_t k = 0;
k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT));
k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t keyPathConfig(int baseTypeID, int log2len, int dir, int level, int config) {
dir = (dir & SLEEF_MODE_BACKWARD) == 0;
int cat = 4;
uint64_t k = 0;
k = (k << BUTCONFIGBIT) | (config & ~(~(uint64_t)0 << BUTCONFIGBIT));
k = (k << LEVELBIT) | (level & ~(~(uint64_t)0 << LEVELBIT));
k = (k << LOG2LENBIT) | (log2len & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << DIRBIT) | (dir & ~(~(uint64_t)0 << LOG2LENBIT));
k = (k << BASETYPEIDBIT) | (baseTypeID & ~(~(uint64_t)0 << BASETYPEIDBIT));
k = (k << CATBIT) | (cat & ~(~(uint64_t)0 << CATBIT));
return k;
}
static uint64_t planMap_getU64(uint64_t key) {
char *s = ArrayMap_get(planMap, key);
if (s == NULL) return 0;
uint64_t ret;
if (sscanf(s, "%" SCNx64, &ret) != 1) return 0;
return ret;
}
static void planMap_putU64(uint64_t key, uint64_t value) {
char *s = malloc(100);
sprintf(s, "%" PRIx64, value);
s = ArrayMap_put(planMap, key, s);
if (s != NULL) free(s);
}
int PlanManager_loadMeasurementResultsP(SleefDFT *p, int pathCat) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
initPlanMapLock();
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
int stat = planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10));
if (stat == 0) {
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return 0;
}
int ret = 1;
for(int j = p->log2len;j >= 0;j--) {
p->bestPath[j] = planMap_getU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat));
p->bestPathConfig[j] = planMap_getU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat));
if (p->bestPath[j] > MAXBUTWIDTH) ret = 0;
}
p->pathLen = 0;
for(int j = p->log2len;j >= 0;j--) if (p->bestPath[j] != 0) p->pathLen++;
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return ret;
}
void PlanManager_saveMeasurementResultsP(SleefDFT *p, int pathCat) {
assert(p != NULL && (p->magic == MAGIC_FLOAT || p->magic == MAGIC_DOUBLE || p->magic == MAGIC_LONGDOUBLE || p->magic == MAGIC_QUAD));
initPlanMapLock();
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
if (planMap_getU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10)) != 0) {
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return;
}
for(int j = p->log2len;j >= 0;j--) {
planMap_putU64(keyPath(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPath[j]);
planMap_putU64(keyPathConfig(p->baseTypeID, p->log2len, p->mode, j, pathCat), p->bestPathConfig[j]);
}
planMap_putU64(keyButStat(p->baseTypeID, p->log2len, p->mode, pathCat+10), 1);
if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile();
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
}
int PlanManager_loadMeasurementResultsT(SleefDFT *p) {
assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD));
initPlanMapLock();
int ret = 0;
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
p->tmNoMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0));
p->tmMT = planMap_getU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1));
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
return p->tmNoMT != 0;
}
void PlanManager_saveMeasurementResultsT(SleefDFT *p) {
assert(p != NULL && (p->magic == MAGIC2D_FLOAT || p->magic == MAGIC2D_DOUBLE || p->magic == MAGIC2D_LONGDOUBLE || p->magic == MAGIC2D_QUAD));
initPlanMapLock();
int ret = 0;
#ifdef _OPENMP
omp_set_lock(&planMapLock);
#endif
if (!planFileLoaded) loadPlanFromFile();
planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 0), p->tmNoMT);
planMap_putU64(keyTrans(p->baseTypeID, p->log2hlen, p->log2vlen, 1), p->tmMT );
if ((planMode & SLEEF_PLAN_READONLY) == 0) savePlanToFile();
#ifdef _OPENMP
omp_unset_lock(&planMapLock);
#endif
}
|
dds.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColorLookup
{
DDSSourceBlock sources[2];
} DDSSingleColorLookup;
typedef MagickBooleanType
DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *);
typedef MagickBooleanType
DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *);
static const DDSSingleColorLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColorLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColorLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if ((ssize_t) max - min < steps) \
max = MagickMin(min + steps, 255); \
if ((ssize_t) max - min < steps) \
min = MagickMax(0, (ssize_t) max - steps)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *, DDSVector4 *, unsigned char *, size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT3(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadDXT5(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType,
ExceptionInfo *),
ReadUncompressedRGB(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
ReadUncompressedRGBA(const ImageInfo *,Image *,DDSInfo *,
const MagickBooleanType,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *),
WriteMipmaps(Image *,const ImageInfo*,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,const MagickBooleanType,
ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3,unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3* end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4* points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColorLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z)));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
const char
*option;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
Image
*image;
MagickBooleanType
status,
cubemap,
volume,
read_mipmaps;
PixelTrait
alpha_trait;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cubemap=MagickFalse,
volume=MagickFalse,
read_mipmaps=MagickFalse;
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
alpha_trait = BlendPixelTrait;
decoder = ReadUncompressedRGBA;
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
alpha_trait = UndefinedPixelTrait;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
alpha_trait = UndefinedPixelTrait;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
alpha_trait = BlendPixelTrait;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
alpha_trait = BlendPixelTrait;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
if ((num_images == 0) || (num_images > GetBlobSize(image)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse)
ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit");
option=GetImageOption(image_info,"dds:skip-mipmaps");
if (IsStringFalse(option) != MagickFalse)
read_mipmaps=MagickTrue;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
/* Start a new image */
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->alpha_trait=alpha_trait;
image->compression=compression;
image->columns=dds_info.width;
image->rows=dds_info.height;
image->storage_class=DirectClass;
image->endian=LSBEndian;
image->depth=8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
(void) SetImageBackgroundColor(image,exception);
status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
if (n == 0)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
}
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y,
DDSColors colors,size_t bits,Quantum *q)
{
register ssize_t
i;
ssize_t
j;
unsigned char
code;
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q);
if ((colors.a[code] != 0) &&
(image->alpha_trait == UndefinedPixelTrait))
return(MagickFalse);
q+=GetPixelChannels(image);
}
}
}
return(MagickTrue);
}
static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception)
{
MagickBooleanType
status;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
status=MagickTrue;
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
AcquireNextImage(image_info,image,exception);
if (image->next == (Image *) NULL)
return(MagickFalse);
image->next->alpha_trait=image->alpha_trait;
image=SyncNextImageInList(image);
status=SetImageExtent(image,w,h,exception);
if (status == MagickFalse)
break;
status=decoder(image,dds_info,exception);
if (status == MagickFalse)
break;
if ((w == 1) && (h == 1))
break;
w=DIV2(w);
h=DIV2(h);
}
}
return(status);
}
static MagickBooleanType ReadDXT1Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
x;
size_t
bits;
ssize_t
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read 8 bytes of data from the image */
c0=ReadBlobLSBShort(image);
c1=ReadBlobLSBShort(image);
bits=ReadBlobLSBLong(image);
CalculateColors(c0,c1,&colors,MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse)
{
/* Correct alpha */
SetImageAlpha(image,QuantumRange,exception);
q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x),
MagickMin(4,image->rows-y),exception);
if (q != (Quantum *) NULL)
SetDXT1Pixels(image,x,y,colors,bits,q);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5Pixels(Image *image,
DDSInfo *magick_unused(dds_info),ExceptionInfo *exception)
{
DDSColors
colors;
MagickSizeType
alpha_bits;
register Quantum
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
ssize_t
j,
y;
unsigned short
c0,
c1;
magick_unreferenced(dds_info);
for (y = 0; y < (ssize_t) image->rows; y += 4)
{
for (x = 0; x < (ssize_t) image->columns; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x),
MagickMin(4, image->rows - y),exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) image->columns &&
(y + j) < (ssize_t) image->rows)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q);
SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q);
SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q);
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q);
q+=GetPixelChannels(image);
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
}
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image,
DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception));
else
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGBPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
x, y;
unsigned short
color;
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q);
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType,exception);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBAPixels(Image *image,
DDSInfo *dds_info,ExceptionInfo *exception)
{
register Quantum
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleAlphaType,exception);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) image->rows; y++)
{
q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception);
if (q == (Quantum *) NULL)
return(MagickFalse);
for (x = 0; x < (ssize_t) image->columns; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)),q);
}
else if (alphaBits == 2)
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(color >> 8)),q);
SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q);
}
else
{
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)),q);
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)),q);
}
}
else
{
SetPixelBlue(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelGreen(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelRed(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
SetPixelAlpha(image,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return(MagickFalse);
if (EOFBlob(image) != MagickFalse)
return(MagickFalse);
}
return(MagickTrue);
}
static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info,
Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps,
ExceptionInfo *exception)
{
if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse)
return(MagickFalse);
if (read_mipmaps != MagickFalse)
return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels,
exception));
else
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
w=DIV2(dds_info->width);
h=DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset=(MagickOffsetType)w*h*pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w=DIV2(w);
h=DIV2(h);
if ((w == 1) && (h == 1))
break;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if ((clusterFit == MagickFalse) || (count == 0))
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image, ExceptionInfo *exception)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
fromlist,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace,exception);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (image->alpha_trait == UndefinedPixelTrait)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
if (image_info->compression == DXT1Compression)
compression=FOURCC_DXT1;
else if (image_info->compression == NoCompression)
pixelFormat=DDPF_RGB;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (IsStringTrue(option) != MagickFalse)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (IsStringTrue(option) != MagickFalse)
weightByAlpha=MagickTrue;
}
}
}
mipmaps=0;
fromlist=MagickFalse;
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
{
if (LocaleNCompare(option,"fromlist",8) == 0)
{
Image
*next;
fromlist=MagickTrue;
next=image->next;
while(next != (Image *) NULL)
{
mipmaps++;
next=next->next;
}
}
}
if ((mipmaps == 0) &&
((image->columns & (image->columns - 1)) == 0) &&
((image->rows & (image->rows - 1)) == 0))
{
maxMipmaps=SIZE_MAX;
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
option=GetImageOption(image_info,"dds:raw");
if (IsStringTrue(option) == MagickFalse)
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
else
mipmaps=0;
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
exception);
if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression,
mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse))
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MagickPathExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (format == DDPF_FOURCC)
flags=flags | DDSD_LINEARSIZE;
else
flags=flags | DDSD_PITCH;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (pixelFormat == DDPF_FOURCC)
{
/* Compressed DDS requires linear compressed size of first image */
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8));
else /* DXT5 */
(void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1,
(image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16));
}
else
{
/* Uncompressed DDS requires byte pitch of first image */
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4));
else
(void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3));
}
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) memset(software,0,sizeof(software));
(void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent);
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) /* bitcount / masks */
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) /* ddscaps2 + reserved region */
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
register const Quantum
*p;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const Quantum *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(image,p));
else
alpha = 255;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p+=GetPixelChannels(image);
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value, const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char *indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info,
const size_t pixelFormat,const size_t compression,const size_t mipmaps,
const MagickBooleanType fromlist,const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha,ExceptionInfo *exception)
{
const char
*option;
Image
*mipmap_image,
*resize_image;
MagickBooleanType
fast_mipmaps,
status;
register ssize_t
i;
size_t
columns,
rows;
columns=DIV2(image->columns);
rows=DIV2(image->rows);
option=GetImageOption(image_info,"dds:fast-mipmaps");
fast_mipmaps=IsStringTrue(option);
mipmap_image=image;
resize_image=image;
status=MagickTrue;
for (i=0; i < (ssize_t) mipmaps; i++)
{
if (fromlist == MagickFalse)
{
mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter,
exception);
if (mipmap_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
}
else
{
mipmap_image=mipmap_image->next;
if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows))
ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported",
image->filename);
}
DestroyBlob(mipmap_image);
mipmap_image->blob=ReferenceBlob(image->blob);
WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
if (fromlist == MagickFalse)
{
if (fast_mipmaps == MagickFalse)
mipmap_image=DestroyImage(mipmap_image);
else
{
if (resize_image != image)
resize_image=DestroyImage(resize_image);
resize_image=mipmap_image;
}
}
columns=DIV2(columns);
rows=DIV2(rows);
}
if (resize_image != image)
resize_image=DestroyImage(resize_image);
return(status);
}
static void WriteSingleColorFit(Image *image, const DDSVector4 *points,
const ssize_t *map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p)));
if (image->alpha_trait != UndefinedPixelTrait)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
}
|
GB_unop__ainv_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__ainv_fc64_fc64
// op(A') function: GB_unop_tran__ainv_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_FC64_ainv (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_FC64_ainv (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_FC64_ainv (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__ainv_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_FC64_ainv (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_FC64_ainv (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__ainv_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
zhesv.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "plasma.h"
#include "plasma_async.h"
#include "plasma_context.h"
#include "plasma_descriptor.h"
#include "plasma_internal.h"
#include "plasma_types.h"
#include "plasma_workspace.h"
#include <string.h>
/***************************************************************************//**
*
* @ingroup plasma_hesv
*
* Solves a system of linear equations A * X = B with LTLt factorization.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
* TODO: only support Lower for now
*
* @param[in] n
* The order of the matrix A. n >= 0.
*
* @param[in] nrhs
* The number of right hand sides, i.e., the number of
* columns of the matrix B. nrhs >= 0.
*
* @param[in,out] A
* Details of the LTL factorization of the Hermitian matrix A,
* as computed by plasma_zhetrf.
*
* @param[in] lda
* The leading dimension of the array A.
*
* @param[in,out] T
* Details of the LU factorization of the band matrix A, as
* computed by plasma_zgbtrf.
*
* @param[in] ldt
* The leading dimension of the array T.
*
* @param[in] ipiv
* The pivot indices used for zhetrf; for 1 <= i <= min(m,n),
* row i of the matrix was interchanged with row ipiv(i).
*
* @param[in] ipiv2
* The pivot indices used for zgbtrf; for 1 <= i <= min(m,n),
* row i of the matrix was interchanged with row ipiv(i).
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] ldb
* The leading dimension of the array B. ldb >= max(1,n).
*
*******************************************************************************
*
* @retval PlasmaSuccess successful exit
* @retval < 0 if -i, the i-th argument had an illegal value
*
*******************************************************************************
*
* @sa plasma_omp_zhesv
* @sa plasma_chesv
* @sa plasma_dsysv
* @sa plasma_ssysv
* @sa plasma_zhetrf
* @sa plasma_zhetrs
*
******************************************************************************/
int plasma_zhesv(plasma_enum_t uplo, int n, int nrhs,
plasma_complex64_t *pA, int lda,
int *ipiv,
plasma_complex64_t *pT, int ldt,
int *ipiv2,
plasma_complex64_t *pB, int ldb)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if (//(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo (Upper not supported, yet)");
return -1;
}
if (n < 0) {
plasma_error("illegal value of n");
return -2;
}
if (nrhs < 0) {
plasma_error("illegal value of nrhs");
return -5;
}
if (lda < imax(1, n)) {
plasma_error("illegal value of lda");
return -7;
}
if (ldb < imax(1, n)) {
plasma_error("illegal value of ldb");
return -10;
}
// quick return
if (imax(n, nrhs) == 0)
return PlasmaSuccess;
// Set tiling parameters.
int nb = plasma->nb;
// Initialize barrier
plasma_barrier_init(&plasma->barrier);
// Initialize tile matrix descriptors.
plasma_desc_t A;
plasma_desc_t T;
plasma_desc_t B;
int tku = (nb+nb+nb-1)/nb; // number of tiles in upper band (not including diagonal)
int tkl = (nb+nb-1)/nb; // number of tiles in lower band (not including diagonal)
int lm = (tku+tkl+1)*nb; // since we use zgetrf on panel, we pivot back within panel.
// this could fill the last tile of the panel,
// and we need extra NB space on the bottom
int retval;
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, n, 0, 0, n, n, &A);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
retval = plasma_desc_general_band_create(PlasmaComplexDouble, PlasmaGeneral,
nb, nb, lm, n, 0, 0, n, n, nb, nb,
&T);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_band_create() failed");
return retval;
}
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
n, nrhs, 0, 0, n, nrhs, &B);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&A);
return retval;
}
// Create workspace.
plasma_desc_t W;
int ldw = (1+5*A.mt)*nb; /* block column */
retval = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
ldw, nb, 0, 0, ldw, nb, &W);
if (retval != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval;
}
// Create sequence.
plasma_sequence_t *sequence = NULL;
retval = plasma_sequence_create(&sequence);
if (retval != PlasmaSuccess) {
plasma_error("plasma_sequence_create() failed");
return retval;
}
// Initialize request.
plasma_request_t request = PlasmaRequestInitializer;
// Initialize data.
memset(T.matrix, 0, ldt*n*sizeof(plasma_complex64_t));
for (int i = 0; i < nb; i++) ipiv[i] = 1+i;
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(pA, lda, A, sequence, &request);
plasma_omp_zpb2desc(pT, ldt, T, sequence, &request);
plasma_omp_zge2desc(pB, ldb, B, sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_zhesv(uplo, A, ipiv, T, ipiv2, B, W, sequence, &request);
}
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(B, pB, ldb, sequence, &request);
}
// implicit synchronization
// Free matrix A in tile layout.
plasma_desc_destroy(&A);
plasma_desc_destroy(&T);
plasma_desc_destroy(&B);
plasma_desc_destroy(&W);
// Return status.
int status = sequence->status;
plasma_sequence_destroy(sequence);
return status;
}
/***************************************************************************//**
*
* @ingroup plasma_hesv
*
* Solves a system of linear equations using previously
* computed factorization.
* Non-blocking tile version of plasma_zhesv().
* May return before the computation is finished.
* Operates on matrices stored by tiles.
* All matrices are passed through descriptors.
* All dimensions are taken from the descriptors.
* Allows for pipelining of operations at runtime.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of A is stored;
* - PlasmaLower: Lower triangle of A is stored.
*
* @param[in] A
* The triangular factor U or L from the Cholesky factorization
* A = U^H*U or A = L*L^H, computed by plasma_zpotrf.
*
* @param[in,out] B
* On entry, the n-by-nrhs right hand side matrix B.
* On exit, if return value = 0, the n-by-nrhs solution matrix X.
*
* @param[in] sequence
* Identifies the sequence of function calls that this call belongs to
* (for completion checks and exception handling purposes). Check
* the sequence->status for errors.
*
* @param[out] request
* Identifies this function call (for exception handling purposes).
*
* @retval void
* Errors are returned by setting sequence->status and
* request->status to error values. The sequence->status and
* request->status should never be set to PlasmaSuccess (the
* initial values) since another async call may be setting a
* failure value at the same time.
*
*******************************************************************************
*
* @sa plasma_zhesv
* @sa plasma_omp_zhesv
* @sa plasma_omp_chesv
* @sa plasma_omp_dsysv
* @sa plasma_omp_ssysv
* @sa plasma_omp_zhetrf
* @sa plasma_omp_zhetrs
*
******************************************************************************/
void plasma_omp_zhesv(plasma_enum_t uplo,
plasma_desc_t A, int *ipiv,
plasma_desc_t T, int *ipiv2,
plasma_desc_t B,
plasma_desc_t W,
plasma_sequence_t *sequence,
plasma_request_t *request)
{
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_fatal_error("PLASMA not initialized");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// Check input arguments.
if (//(uplo != PlasmaUpper) &&
(uplo != PlasmaLower)) {
plasma_error("illegal value of uplo (Upper not supported, yet)");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(A) != PlasmaSuccess) {
plasma_error("invalid A");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (plasma_desc_check(B) != PlasmaSuccess) {
plasma_error("invalid B");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (sequence == NULL) {
plasma_fatal_error("NULL sequence");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
if (request == NULL) {
plasma_fatal_error("NULL request");
plasma_request_fail(sequence, request, PlasmaErrorIllegalValue);
return;
}
// quick return
if (A.n == 0 || B.n == 0)
return;
// Call the parallel functions.
plasma_pzhetrf_aasen(uplo, A, ipiv, T, W, sequence, request);
plasma_pzgbtrf(T, ipiv2, sequence, request);
if (uplo == PlasmaLower) {
plasma_desc_t vA;
plasma_desc_t vB;
// forward-substitution with L
if (A.m > A.nb) {
vA = plasma_desc_view(A,
A.nb, 0,
A.m-A.nb, A.n-A.nb);
vB = plasma_desc_view(B,
B.nb, 0,
B.m-B.nb, B.n);
plasma_pzgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request);
#pragma omp taskwait
plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit,
1.0, vA,
vB,
sequence, request);
}
// solve with band matrix T
#pragma omp taskwait
plasma_pztbsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans,
PlasmaUnit,
1.0, T,
B,
ipiv2,
sequence, request);
plasma_pztbsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans,
PlasmaNonUnit,
1.0, T,
B,
ipiv2,
sequence, request);
// backward-substitution with L^H
if (A.m > A.nb) {
plasma_pztrsm(PlasmaLeft, PlasmaLower, PlasmaConjTrans, PlasmaUnit,
1.0, vA,
vB,
sequence, request);
#pragma omp taskwait
plasma_pzgeswp(PlasmaRowwise, B, ipiv, -1, sequence, request);
}
}
else {
// TODO: upper
}
}
|
lcm2_profiler.c | /** @file lcm2_profiler.c
*
* @par Copyright:
* 2009-2018 (C) Kai-Uwe Behrmann
*
* @brief littleCMS CMM profile generator for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* MIT <http://www.opensource.org/licenses/MIT>
* @since 2009/10/24
*/
#include "lcm2_profiler.h"
#include <assert.h>
#include <lcms2.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <wchar.h>
#ifndef OY_UNUSED
#if (__GNUC__*100 + __GNUC_MINOR__) >= 406
#define OY_UNUSED __attribute__ ((unused))
#elif defined(_MSC_VER)
#define OY_UNUSED __declspec(unused)
#else
#define OY_UNUSED
#endif
#endif
#ifndef OY_FALLTHROUGH
#if defined(__clang__)
#define OY_FALLTHROUGH
#elif __GNUC__ >= 7
#define OY_FALLTHROUGH __attribute__ ((fallthrough));
#else
#define OY_FALLTHROUGH
#endif
#endif
#if LCMS_VERSION < 2050
/* 'dscm' */
#define cmsSigProfileDescriptionMLTag 0x6473636d
#endif
#define lcm2Free_m(v) if(v) { free(v); v = NULL; }
extern lcm2Message_f lcm2msg_p;
static const int max_channels = 16;
/* core functions */
typedef struct {
cmsHTRANSFORM in2MySpace;
cmsHTRANSFORM mySpace2Out;
lcm2Sampler_f sampler;
void * sampler_variables;
int channelsIn;
int channelsProcess;
int channelsOut;
} lcm2Cargo_s;
int lcm2samplerDouble ( double in[],
double out[],
void * Cargo )
{
int i;
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
// color convert from input space to prcess color space
if(d->in2MySpace)
cmsDoTransform( d->in2MySpace, in, in, 1 );
// custom data processing
d->sampler(in,out,d->sampler_variables);
// converting from process space to output space
if(d->mySpace2Out)
cmsDoTransform( d->mySpace2Out, out, out, 1 );
// clipping
for(i = 0; i < d->channelsOut; ++i)
{
if(out[i] > 1.0)
out[i] = 1.0;
if(out[i] < 0.0)
out[i] = 0.0;
}
return TRUE;
}
int lcm2sampler16 (const cmsUInt16Number In[],
cmsUInt16Number Out[],
void * Cargo)
{
int i, v, result = TRUE;
double in[max_channels], out[max_channels],
scaler = 65536.0;
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
for(i = 0; i < d->channelsIn; ++i)
in[i] = In[i] / scaler;
result = lcm2samplerDouble( in, out, Cargo );
for(i = 0; i < d->channelsOut; ++i)
{
v = out[i] * scaler;
// integer clipping
if(v > 65535)
Out[i] = 65535;
else
Out[i] = v;
}
return result;
}
int lcm2samplerFloat ( const cmsFloat32Number In[],
cmsFloat32Number Out[],
void * Cargo )
{
int i, result = TRUE;
double in[max_channels], out[max_channels];
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
for(i = 0; i < d->channelsIn; ++i)
in[i] = In[i];
result = lcm2samplerDouble( in, out, Cargo );
for(i = 0; i < d->channelsOut; ++i)
Out[i] = out[i];
return result;
}
/** \addtogroup profiler ICC profiler API
* @brief Easy to use API to generate matrix and LUT ICC profiles.
*
* @{ */
/** Function lcm2OpenProfileFile
* @brief Open a profile from file
*
* @code
// create ICC profile with linear gamma, RGB.709 primaries + D65 from wildcard
if(in_space_profile) h_in_space = lcm2OpenProfileFile( "*srgblinear", NULL );
@endcode
*
* @param[in] my_space_profile operating color space.
* Use a file name or
* possible wildcards:
* - *srgblinear
* - *srgb
* - *lab
* - *rec601.625.linear
* - *rec601.525.linear
* @param[in] my_space_profile_path path name for
* for my_space_profile; optional
* @return lcms profile handle
*
* @version Oyranos: 0.9.6
* @date 2016/03/04
* @since 2016/03/04 (Oyranos: 0.9.6)
*/
cmsHPROFILE lcm2OpenProfileFile ( const char * my_space_profile,
const char * my_space_profile_path )
{
cmsHPROFILE h_my_space = 0;
if(my_space_profile_path == NULL) my_space_profile_path = "";
if(my_space_profile && my_space_profile[0])
{
char * full_name = (char*) malloc(strlen(my_space_profile_path) + strlen(my_space_profile) + 1);
if(!full_name) return NULL;
sprintf( full_name, "%s%s", my_space_profile_path, my_space_profile );
if(strcmp(my_space_profile,"*lab") == 0)
h_my_space = cmsCreateLab4Profile(cmsD50_xyY());
else
if(strcmp(my_space_profile,"*xyz") == 0)
h_my_space = cmsCreateXYZProfile( );
else
if(strcmp(my_space_profile,"*srgb") == 0)
h_my_space = cmsCreate_sRGBProfile( );
else
if(strcmp(my_space_profile,"*srgblinear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33,
0.30, 0.60,
0.15, 0.06,
0.3127,0.329 );
else /* ITU-R BT.601-7 625-line, 50 field/s systems */
if(strcmp(my_space_profile,"*rec601.625.linear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33,
0.29, 0.60,
0.15, 0.06,
0.3127,0.329 );
else /* ITU-R BT.601-7 525-line, 60/1.001, field/s systems */
if(strcmp(my_space_profile,"*rec601.525.linear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.63, 0.34,
0.31, 0.595,
0.155, 0.07,
0.3127,0.329 );
if(!h_my_space)
h_my_space = cmsOpenProfileFromFile( full_name, "rb" );
if(!h_my_space) { lcm2msg_p( 300, NULL, "no profile from %s", full_name); }
/*else printf("will use %s\n", full_name);*/
lcm2Free_m(full_name);
}
return h_my_space;
}
/** Function lcm2WriteProfileToFile
* @brief Write a profile to a file
*
* Suggested is a scheme of "space version vendor.icc".
*
* @code
// "My-Space_v1.0_myna.icc"
char * file_name = lcm2WriteProfileToFile( my_space_profile,
"My-Space", "v1.0", "myna" );
@endcode
*
* @param[in] my_space_profile the profile
* @param[in] my_space_profile_name the color space name
* @param[in] my_space_profile_version the version of the profile; optional
* @param[in] vendor_four_bytes the vendor, just four bytes; optional
* @return constructed file name;
* can be released with free()
*
* @version Oyranos: 0.9.6
* @date 2016/03/06
* @since 2016/02/16 (Oyranos: 0.9.6)
*/
char * lcm2WriteProfileToFile ( cmsHPROFILE my_space_profile,
const char * my_space_profile_name,
const char * my_space_profile_version,
const char * vendor_four_bytes )
{
int i;
i = 0;
char * fn = (char*) malloc(strlen(my_space_profile_name) +
(my_space_profile_version ? strlen(my_space_profile_version):0) +
(vendor_four_bytes ? strlen(vendor_four_bytes):0) + 8);
if(!fn) return fn;
sprintf( fn, "%s%s%s%s%s%s", my_space_profile_name,
my_space_profile_version ? " " : "", my_space_profile_version?my_space_profile_version:"",
vendor_four_bytes ? " " : "", vendor_four_bytes?vendor_four_bytes:"",
strstr(my_space_profile_name, ".icc") ? "" : ".icc" );
while(fn[i]) { if(fn[i] == ' ') fn[i] = '_'; ++i; }
cmsSaveProfileToFile( my_space_profile, fn );
return fn;
}
/** Function lcm2WriteProfileToMem
*
* Save a cmsHPROFILE to a in memory data blob
*
* @version Oyranos: 0.9.7
* @since 2008/12/28 (Oyranos: 0.9.7)
* @date 2017/06/07
*/
void * lcm2WriteProfileToMem ( cmsHPROFILE * profile,
size_t * size,
void * (*allocateFunc)(size_t size) )
{
int error = !profile;
void * data = 0;
cmsUInt32Number size_ = 0;
if(!error)
{
*size = 0;
if(!cmsSaveProfileToMem( profile, NULL, &size_ ))
lcm2msg_p( 300, NULL, "cmsSaveProfileToMem failed" );
if(size_)
{
if(allocateFunc)
data = allocateFunc( size_ );
else
data = malloc( size_ );
cmsSaveProfileToMem( profile, data, &size_ );
} else
lcm2msg_p( 300, NULL, "can not convert lcms2 profile to memory" );
*size = size_;
} else
lcm2msg_p( 301, NULL, "no profle" );
return data;
}
/* --- CIE*Lab space familiy --- */
/** \addtogroup samplers Samplers
* @{ */
static double CIE_C_scaler = M_SQRT2; /* fit all Lab into LCh */
/** Function lcm2SamplerLab2LCh
* @brief CIE*Lab -> CIE*LCh in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input Lab triple
* @param[out] o output LCh triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/13/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerLab2LCh ( const double i[],
double o[],
void * none OY_UNUSED )
{
double a = (i[1] - 0.5) * CIE_C_scaler,
b = (i[2] - 0.5) * CIE_C_scaler;
/* CIE*L */
o[0] = i[0];
/* CIE*C = sqrt(CIE*a² + CIE*b²) */
o[1] = hypot(a,b);
/* CIE*h = atan2(CIE*b, CIE*a) */
o[2] = atan2(b,a)/M_PI/2.0 + 0.5;
}
/** Function lcm2SamplerLCh2Lab
* @brief CIE*LCh -> CIE*Lab in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input LCh triple
* @param[out] o output Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.7
* @date 2017/12/05
* @since 2016/13/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerLCh2Lab ( const double i[],
double o[],
void * none OY_UNUSED )
{
/* CIE*L */
o[0] = i[0];
/* CIE*a = C * cos(h) */
o[1] = 1.0 - (i[1] * cos(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5);
/* CIE*b = C * sin(h) */
o[2] = 1.0 - (i[1] * sin(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5);
}
/* sRGB */
cmsViewingConditions lcm2_vc_srgb_ =
{
{ 95.05, 100.0, 108.88 }, /* D65 white point */
20, /* viewing background luminance Yb */
4, /* ambient in cd/m² (== 64 lux) */
2, /* Dim sourround */
1 /* adapted (0-1) */
};
/** Function lcm2SamplerJCh2Lab
* @brief CIE*LCh -> CIE*Lab in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input LCh triple
* @param[out] o output Lab triple
* @param[in] v (cmsViewingConditions*); optional, default sRGB
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerJCh2Lab ( const double i[],
double o[],
void * v )
{
cmsViewingConditions * vc = &lcm2_vc_srgb_;
cmsHANDLE vh;
cmsCIEXYZ XYZ;
cmsJCh JCh = { i[0], i[1], i[2] };
vh = cmsCIECAM02Init( NULL, v?v:vc );
cmsCIECAM02Reverse( vh, &JCh, &XYZ );
cmsCIECAM02Done( vh );
lcm2CIEXYZ2iccLab( &XYZ, o );
}
/** Function lcm2SamplerLab2JCh
* @brief CIE*Lab -> CIE*JCh
*
* The CIECAM02 appearance space.
*
* @param[in] i input Lab triple
* @param[out] o output JCh triple
* @param[in] v (cmsViewingConditions*); optional, default sRGB
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerLab2JCh ( const double i[],
double o[],
void * v )
{
cmsViewingConditions * vc = &lcm2_vc_srgb_;
cmsHANDLE vh;
cmsCIEXYZ XYZ;
cmsJCh JCh;
lcm2iccLab2CIEXYZ( i, &XYZ );
vh = cmsCIECAM02Init( NULL, v?v:vc );
cmsCIECAM02Forward( vh, &XYZ, &JCh );
cmsCIECAM02Done( vh );
o[0] = JCh.J;
o[1] = JCh.C;
o[2] = JCh.h;
}
/* --- YCbCr space familiy --- */
typedef enum {
ITU_R_BT_601,
ITU_R_BT_601_JPEG,
ITU_REC_709,
ITU_R_BT_2020
} ITU_Std_e;
const char * ITU_Std_dscr [] = { "ITU-R BT.601", "ITU-R BT.601 / JPEG", "ITU REC-709", "ITU-R BT.2020", NULL };
static void selectKbKr( ITU_Std_e ITU_Std, double * Kb, double * Kr )
{
switch(ITU_Std)
{
case ITU_R_BT_601:
case ITU_R_BT_601_JPEG:
// ITU-R BT.601 - JPEG
*Kb = 0.114;
*Kr = 0.299;
break;
case ITU_REC_709:
// ITU REC-709
*Kb = 0.0722;
*Kr = 0.2126;
break;
case ITU_R_BT_2020:
// ITU-R BT.2020
*Kb = 0.0593;
*Kr = 0.2627;
break;
}
}
void selectBlackScale( ITU_Std_e ITU_Std, double * black, double * scale )
{
switch(ITU_Std)
{
case ITU_R_BT_601_JPEG:
*black = 0;
*scale = 255;
break;
case ITU_R_BT_601:
case ITU_REC_709:
case ITU_R_BT_2020:
*black = 16;
*scale = 219;
break;
}
}
void linear2ycbcr( double *L_ )
{
double L = *L_;
double alpha = 1.09929682680944,
beta = 0.018053968510807;
// linear -> gamma
if(L < beta)
L *= 4.5;
else
L = pow(L,0.45) - (alpha - 1);
*L_ = L;
}
void ycbcr2linear( double *V_ )
{
double L = *V_;
double alpha = 1.09929682680944,
beta = 0.081243; /* 0.018053968510807 * 4.5 */
// linear -> gamma
if(L < beta)
L /= 4.5;
else
L = pow( (L + (alpha-1)) / alpha, 1.0/0.45 );
*V_ = L;
}
static void rgb2ycbcr( double R, double G, double B,
double *Y_, double *Pb_, double *Pr_,
double Kb, double Kr )
{
double Y,Pb,Pr;
// common RGB -> YCbCr formula
Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B;
Pb = 1.0/2.0 * (B-Y)/(1.0-Kb);
Pr = 1.0/2.0 * (R-Y)/(1.0-Kr);
*Y_ = Y; *Pb_ = Pb; *Pr_ = Pr;
}
static void ycbcr2rgb( double Y, double Pb, double Pr,
double *R_, double *G_, double *B_,
double Kb, double Kr )
{
double R,G,B;
// common YCbCr -> RGB formula
// Pb = 1.0/2.0 * (B-Y)/(1.0-Kb);
// 2*Pb = (B-Y)/(1-Kb)
// 2*Pb*(1-Kb) = B-Y
// 2*Pb*(1-Kb)+Y = B
B = 2*Pb*(1-Kb) + Y;
// Pr = 1.0/2.0 * (R-Y)/(1.0-Kr);
// 2*Pr*(1-Kr)+Y = R
R = 2*Pr*(1-Kr) + Y;
// Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B;
// Y-(Kr*R)-(Kb*B) = (1-Kb-Kr) * G
// (Y-(Kr*R)-(Kb*B))/(1-Kb-Kr) = G
G = (Y - Kr*R - Kb*B)/(1.0-Kb-Kr);
*R_ = R; *G_ = G; *B_ = B;
}
static void scaleRGB( ITU_Std_e ITU_Std, double scale, double * R, double * G, double * B )
{
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*R *= scale;
*G *= scale;
*B *= scale;
break;
}
}
static void scaleLinearToYCbCr( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr )
{
max /= 255.0;
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
*Y *= (235.*max-16.*max);
*Y += 16.*max;
*Cb *= (240.*max-16.*max);
*Cb += 128.*max;
*Cr *= (240.*max-16.*max);
*Cr += 128.*max;
break;
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*Y *= 255.*max;
*Cb *= 255.*max;
*Cb += 128.*max;
*Cr *= 255.*max;
*Cr += 128.*max;
break;
}
}
static void scaleYCbCrToLinear( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr )
{
max /= 255.0;
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
*Y -= 16.*max;
*Y /= (235.*max-16.*max);
*Cb -= 128.*max;
*Cb /= (240.*max-16.*max);
*Cr -= 128.*max;
*Cr /= (240.*max-16.*max);
break;
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*Y /= 255.*max;
*Cb -= 128.*max;
*Cb /= 255.*max;
*Cr -= 128.*max;
*Cr /= 255.*max;
break;
}
}
/** Function lcm2SamplerRGB2JpegYCbCr
* @brief RGB -> YCbCr in Jpeg range
*
* ITU R BT 601 / REC.601 coefficients with Jpeg range of 0-1 is generated.
*
* @param[in] i input RGB triple
* @param[out] o output REC.601 YCbCr in JPEG range triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/07 (Oyranos: 0.9.6)
*/
void lcm2SamplerRGB2JpegYCbCr (
const double i[],
double o[],
void * none OY_UNUSED )
{
/* final space PCS.Lab -> YCbCr */
/** Jpeg assumes no gamma correction.
* Thus this sampler converts from RGB.
*/
ITU_Std_e std = ITU_R_BT_601_JPEG;
double Kr,Kb,
Y = i[0], Pb = i[1], Pr = i[2],
R = i[0], G = i[1], B = i[2];
selectKbKr( std, &Kb, &Kr );
scaleRGB( std, 1.0, &R, &G, &B );
rgb2ycbcr( R, G, B, &Y, &Pb, &Pr, Kb,Kr );
scaleLinearToYCbCr( std, 1.0, &Y, &Pb, &Pr );
o[0] = Y; o[1] = Pb; o[2] = Pr;
}
/** Function lcm2SamplerRGB2JpegYCbCr
* @brief YCbCr in Jpeg range -> RGB
*
* ITU R BT 601 / REC.601 coefficients in Jpeg range of 0-1 is assumed.
*
* @param[in] i input REC.601 YCbCr in JPEG range triple
* @param[out] o output RGB triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerJpegYCbCr2RGB( const double i[],
double o[],
void * none OY_UNUSED )
{
/* final space YCbCr -> PCS.Lab
* Jpeg assumes no gamma correction
* Thus this sampler converts to RGB
* YCbCr -> scale range -> linear YCbCr -> (linear RGB (REC.709) -> Lab)
*/
ITU_Std_e std = ITU_R_BT_601_JPEG;
double Kr,Kb,
Y = i[0], Pb = i[1], Pr = i[2],
R,G,B;
selectKbKr( std, &Kb, &Kr );
scaleYCbCrToLinear( std, 1.0, &Y, &Pb, &Pr );
ycbcr2rgb( Y, Pb, Pr, &R, &G, &B, Kb,Kr );
scaleRGB( std, 1.0, &R, &G, &B );
o[0] = R; o[1] = G; o[2] = B;
}
/** Function lcm2SamplerIdendity
* @brief Lab -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.7
* @date 2018/02/26
* @since 2018/02/26 (Oyranos: 0.9.7)
*/
void lcm2SamplerIdendity ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0]; // L / CIE*L / Y / R
o[1] = i[1]; // M / CIE*a / Cb / G
o[2] = i[2]; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerGrayer
* @brief Lab -> Gray -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerGrayer ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0]*1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerBlacknWhite
* @brief Lab -> Black&White -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerBlacknWhite ( const double i[],
double o[],
void * none OY_UNUSED )
{
if(i[0] <= 0.5)
o[0] = 0.0; // L / CIE*L / Y / R
else
o[0] = 1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerSepia
* @brief Lab -> LCh -> Yellow -> LCh -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Creates a single reddish hue.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/14
* @since 2016/03/14 (Oyranos: 0.9.6)
*/
void lcm2SamplerSepia ( const double i[],
double o[],
void * none )
{
double in[3],out[3];
lcm2SamplerLab2LCh( i,in,none );
out[0] = in[0];
out[1] = 0.04+0.04*in[0];
out[2] = 0.18;
lcm2SamplerLCh2Lab( out,o,none );
}
/** Function lcm2SamplerReddish
* @brief Lab -> reddish tint -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like Sepia, but gives all colors a reddish tint.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/15
* @since 2016/03/15 (Oyranos: 0.9.6)
*/
void lcm2SamplerReddish ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0];
o[1] = i[1] + 0.012+0.012*i[0];
o[2] = i[2] + 0.025+0.025*i[0];
}
/** Function lcm2SamplerWhitePointLab
* @brief Lab -> White Point Adaption -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like reddish, but adapts all colors to a given white point difference.
* It uses simple linear adaption inside CIE*Lab.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two doubles with
* desired ICC*ab differences
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2017/05/17 (Oyranos: 0.9.7)
*/
void lcm2SamplerWhitePointLab( const double i[],
double o[],
void * data )
{
double * icc_ab = (double*) data;
o[0] = i[0];
o[1] = i[1] + icc_ab[0] * i[0];
o[2] = i[2] + icc_ab[1] * i[0];
}
/** Function lcm2iccLab2CIEXYZ
* @brief ICC*Lab -> CIE*XYZ
*
* Converts from PCS Lab encoding to lcms XYZ type.
*
* @param[in] icc_Lab input Lab triple in PCS range
* @param[out] XYZ output XYZ struct
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2iccLab2CIEXYZ ( const double * icc_Lab,
cmsCIEXYZ * XYZ )
{
cmsCIELab Lab;
Lab.L = icc_Lab[0] * 100.0;
Lab.a = icc_Lab[1] * 257.0 - 128.0;
Lab.b = icc_Lab[2] * 257.0 - 128.0;
cmsLab2XYZ( cmsD50_XYZ(), XYZ, &Lab);
}
/** Function lcm2CIEXYZ2iccLab
* @brief CIE*XYZ -> ICC*Lab
*
* Converts from lcms XYZ type to PCS Lab encoding.
*
* @param[in] XYZ input XYZ struct
* @param[out] icc_Lab output Lab triple in PCS range
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2CIEXYZ2iccLab ( const cmsCIEXYZ * XYZ,
double * icc_Lab )
{
cmsCIELab Lab;
cmsXYZ2Lab( cmsD50_XYZ(), &Lab, XYZ );
icc_Lab[0] = Lab.L / 100.0;
icc_Lab[1] = (Lab.a + 128.0) / 257.0;
icc_Lab[2] = (Lab.b + 128.0) / 257.0;
}
/** Function lcm2iccXYZ2iccLab
* @brief ICC*XYZ -> ICC*Lab
*
* Converts from PCS XYZ to PCS Lab encoding.
*
* @param[in] XYZ input XYZ triple
* @param[out] icc_Lab output Lab triple in PCS range
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2iccXYZ2iccLab ( const double * XYZ,
double * icc_Lab )
{
cmsCIEXYZ XYZ_ = { XYZ[0], XYZ[1], XYZ[2] };
lcm2CIEXYZ2iccLab( &XYZ_, icc_Lab );
}
/** Function lcm2SamplerWhitePointBradford
* @brief Lab -> Bradford White Point Adaption -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like reddish, but adapts all colors to a given white point difference.
* It uses Bradford CAT.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two doubles with
* source ICC*XYZ white point, followed by
* destination ICC*XYZ whitepoint
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerWhitePointBradford ( const double i[],
double o[],
void * data )
{
double * icc_XYZ = (double*) data;
double scale = 100.0;
cmsCIEXYZ srcXYZwtpt, iXYZ, oXYZ, dstXYZillu;
srcXYZwtpt.X = icc_XYZ[0] * scale;
srcXYZwtpt.Y = icc_XYZ[1] * scale;
srcXYZwtpt.Z = icc_XYZ[2] * scale;
dstXYZillu.X = icc_XYZ[3+0] * scale;
dstXYZillu.Y = icc_XYZ[3+1] * scale;
dstXYZillu.Z = icc_XYZ[3+2] * scale;
lcm2iccLab2CIEXYZ( i, &iXYZ );
cmsAdaptToIlluminant( &oXYZ, &srcXYZwtpt, &dstXYZillu, &iXYZ );
lcm2CIEXYZ2iccLab( &oXYZ, o );
}
/** Function lcm2SamplerProof
* @brief Lab -> proofing profile -> Lab
*
* Convert a proofing profile into a abstract one.
* Abstract profiles can easily be merged into a multi profile transform.
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two void* with
* - desired cmsHTRANSFORM
* for uint32_t arrays in PT_Lab
* - cmsFLAGS_GAMUTCHECK flag
*
* @version Oyranos: 0.9.7
* @since 2009/11/04 (Oyranos: 0.1.10)
* @date 2017/06/03
*/
void lcm2SamplerProof ( const double i[],
double o[],
void * data )
{
cmsCIELab Lab1, Lab2;
double d;
cmsFloat32Number i_[3], o_[3];
void ** ptr = (void**)data;
i_[0] = Lab1.L = i[0] * 100.0;
i_[1] = Lab1.a = i[1] * 257.0 - 128.0;
i_[2] = Lab1.b = i[2] * 257.0 - 128.0;
cmsDoTransform( ptr[0], i_, o_, 1 );
Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2];
d = cmsDeltaE( &Lab1, &Lab2 );
if((fabs(d) > 10) && ptr[1] != NULL)
{
Lab2.L = 50.0;
Lab2.a = Lab2.b = 0.0;
}
o[0] = Lab2.L/100.0;
o[1] = (Lab2.a + 128.0) / 257.0;
o[2] = (Lab2.b + 128.0) / 257.0;
}
/** Function lcm2SamplerProofD
* @brief Lab -> proofing profile -> Lab
*
* Convert a proofing profile into a abstract one.
* Abstract profiles can easily be merged into a multi profile transform.
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two void* with
* - desired cmsHTRANSFORM and
* for uint64_t arrays in PT_Lab
* - cmsFLAGS_GAMUTCHECK flag
*
* @version Oyranos: 0.9.7
* @since 2009/11/04 (Oyranos: 0.1.10)
* @date 2017/11/06
*/
void lcm2SamplerProofD ( const double i[],
double o[],
void * data )
{
cmsCIELab Lab1, Lab2;
double d;
cmsFloat64Number i_[3], o_[3];
void ** ptr = (void**)data;
i_[0] = Lab1.L = i[0] * 100.0;
i_[1] = Lab1.a = i[1] * 257.0 - 128.0;
i_[2] = Lab1.b = i[2] * 257.0 - 128.0;
cmsDoTransform( ptr[0], i_, o_, 1 );
Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2];
d = cmsDeltaE( &Lab1, &Lab2 );
if((fabs(d) > 10) && ptr[1] != NULL)
{
Lab2.L = 50.0;
Lab2.a = Lab2.b = 0.0;
}
o[0] = Lab2.L/100.0;
o[1] = (Lab2.a + 128.0) / 257.0;
o[2] = (Lab2.b + 128.0) / 257.0;
}
/** @} */ /* samplers */
/** Function lcm2CreateProfileLutByFunc
* @brief Generate a ICC profile LUT
*
* This function takes a series of parameters and functions to create a
* ICC profile from. The sampler function operates in a input space and
* and creates colors in a output space. These values are filled into the
* profile LUT. It is possible to create effect profiles of class abstract
* or LUT profiles in any other color space including device links.
*
* For some already available sampler funtions see @ref samplers.
*
* @param[in,out] profile profile to add LUT table
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] my_space_profile operating color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] in_space_profile input color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] tag_sig tag signature for the generated LUT;
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateProfileLutByFunc (
cmsHPROFILE profile,
lcm2Sampler_f samplerMySpace,
void * samplerArg,
const char * in_space_profile,
const char * my_space_profile,
const char * out_space_profile,
int grid_size,
cmsTagSignature tag_sig
)
{
cmsToneCurve * t[max_channels];
int i;
int error = 0;
if(!profile) return 1;
t[0] = cmsBuildGamma(0, 1.0);
if(!t[0]) return 1;
for(i = 1; i < max_channels; ++i) t[i] = t[0];
error = lcm2CreateProfileLutByFuncAndCurves (
profile,
samplerMySpace,
samplerArg,
t, t,
in_space_profile,
my_space_profile,
out_space_profile,
grid_size, tag_sig
);
cmsFreeToneCurve( t[0] );
return error;
}
/** Function lcm2CreateProfileLutByFuncAndCurves
* @brief Generate a ICC profile LUT
*
* This function takes a series of parameters and functions to create a
* ICC profile from. The sampler function operates in a input space and
* and creates colors in a output space. These values are filled into the
* profile LUT. It is possible to create effect profiles of class abstract
* or LUT profiles in any other color space including device links.
*
* For some already available sampler funtions see @ref samplers.
*
* @param[in,out] profile profile to add LUT table
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] in_curves input curves
* @param[in] out_curves output curves
* @param[in] my_space_profile operating color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] in_space_profile input color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] tag_sig tag signature for the generated LUT;
*
* @version Oyranos: 0.9.6
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateProfileLutByFuncAndCurves (
cmsHPROFILE profile,
lcm2Sampler_f samplerMySpace,
void * samplerArg,
cmsToneCurve * in_curves[],
cmsToneCurve * out_curves[],
const char * in_space_profile,
const char * my_space_profile,
const char * out_space_profile,
int grid_size,
cmsTagSignature tag_sig
)
{
cmsHPROFILE h_in_space = 0,
h_my_space = 0,
h_out_space = 0;
cmsHTRANSFORM tr_In2MySpace = 0, tr_MySpace2Out = 0;
cmsStage * gmt_lut = 0, * gmt_lut16 = 0;
cmsPipeline * gmt_pl = cmsPipelineAlloc( 0,3,3 ),
* gmt_pl16 = cmsPipelineAlloc( 0,3,3 );
lcm2Cargo_s cargo;
int i;
int error = 0;
int in_layout, my_layout, out_layout;
in_layout = my_layout = out_layout = (FLOAT_SH(1)|CHANNELS_SH(3)|BYTES_SH(0));
if(!profile) return 1;
if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL );
if(my_space_profile) h_my_space = lcm2OpenProfileFile( my_space_profile, NULL );
if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL );
if(h_in_space && h_my_space && strcmp(in_space_profile,my_space_profile) != 0)
{
tr_In2MySpace = cmsCreateTransformTHR ( 0, h_in_space, in_layout,
h_my_space, my_layout,
INTENT_RELATIVE_COLORIMETRIC,
cmsFLAGS_NOOPTIMIZE);
if(!tr_In2MySpace) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; }
}
if(h_my_space && h_out_space && strcmp(my_space_profile,out_space_profile) != 0)
{
tr_MySpace2Out = cmsCreateTransformTHR( 0, h_my_space, my_layout,
h_out_space, out_layout,
INTENT_RELATIVE_COLORIMETRIC,
cmsFLAGS_NOOPTIMIZE);
if(!tr_MySpace2Out) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; }
}
memset(&cargo, 0, sizeof(lcm2Cargo_s));
cargo.in2MySpace = tr_In2MySpace;
cargo.mySpace2Out = tr_MySpace2Out;
cargo.sampler = samplerMySpace;
cargo.sampler_variables = samplerArg,
cargo.channelsIn = h_in_space ? cmsChannelsOf( cmsGetColorSpace( h_in_space ) ) : 3;
cargo.channelsProcess = h_my_space ? cmsChannelsOf( cmsGetColorSpace( h_my_space ) ) : 3;
cargo.channelsOut = h_out_space ? cmsChannelsOf( cmsGetColorSpace( h_out_space ) ) : 3;
#pragma omp parallel for
for(i = 0; i < 2; ++i)
{
if(i)
{
gmt_lut16 = cmsStageAllocCLut16bit( 0, grid_size, 3,3,0 );
cmsStageSampleCLut16bit( gmt_lut16, lcm2sampler16, &cargo, 0 );
} else
{
gmt_lut = cmsStageAllocCLutFloat( 0, grid_size, 3,3,0 );
cmsStageSampleCLutFloat( gmt_lut, lcm2samplerFloat, &cargo, 0 );
}
}
/* 16-bit int */
cmsPipelineInsertStage( gmt_pl16, cmsAT_BEGIN,
cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) );
cmsPipelineInsertStage( gmt_pl16, cmsAT_END, gmt_lut16 );
cmsPipelineInsertStage( gmt_pl16, cmsAT_END,
cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) );
cmsWriteTag( profile, (tag_sig!=0)?tag_sig:cmsSigAToB0Tag, gmt_pl16 );
/* float */
/* cmsPipeline owns the cmsStage memory */
cmsPipelineInsertStage( gmt_pl, cmsAT_BEGIN,
cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) );
cmsPipelineInsertStage( gmt_pl, cmsAT_END, gmt_lut );
cmsPipelineInsertStage( gmt_pl, cmsAT_END,
cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) );
//cmsWriteTag( gmt, cmsSigDToB0Tag, gmt_pl );
lcm2CreateProfileLutByFuncAndCurvesClean:
if(h_in_space) {cmsCloseProfile( h_in_space );} h_in_space = 0;
if(h_my_space) {cmsCloseProfile( h_my_space );} h_my_space = 0;
if(h_out_space) {cmsCloseProfile( h_out_space );} h_out_space = 0;
if(tr_In2MySpace) {cmsDeleteTransform( tr_In2MySpace );} tr_In2MySpace = 0;
if(tr_MySpace2Out) {cmsDeleteTransform( tr_MySpace2Out );} tr_MySpace2Out = 0;
if(gmt_pl16) cmsPipelineFree( gmt_pl16 );
if(gmt_pl) cmsPipelineFree( gmt_pl );
return error;
}
/** Function lcm2CreateAbstractProfile
* @brief Create a effect profile of type abstract in ICC*Lab PCS
*
* Here a code example:
* @code
void samplerGrayer (const double i[],
double o[])
{
o[0] = i[0]*1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
const char * name_i18n[] = {
"de", "DE", "Graustufen (MyProject)",
"en", "US", "Grayer (MyProject)"
};
lcm2CreateAbstractProfile (
samplerGrayer,
NULL,
"*lab", // CIE*Lab
5,
2.3,
"Grayer (MyProject)",
name_i18n,
"Grayer myna",
"My Project 2016",
"My Name",
ICC_2011_LICENSE,
"CIE*L",
"http://www.cie.co.at",
NULL,
NULL
);
@endcode
*
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] my_space_profile operating color space
* for samplerMySpace();
* "*lab" will set CIE*Lab
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[in] my_abstract_description internal profile name
* @param[in] my_abstract_descriptions internal profile name translated
* @param[in] my_abstract_file_name profile file name. If present a ICC profile will be written to that name. optional
* @param[in] provider e.g. "My Project 2016"
* @param[in] vendor e.g. "My Name"
* @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile."
* - first %%s is replaced by the provider string arg and
* - second %%s is replaced by the vendor string arg
* @param[in] device_model e.g. "My Set"
* @param[in] device_manufacturer e.g. "www.mydomain.net"
* @param[in] my_meta_data e.g. {"DOMAIN_,GROUP_","DOMAIN_key1","value1","GROUP_key2","value2"}
* @param[out] h_profile the resulting profile
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateAbstractProfile(
lcm2Sampler_f samplerMySpace,
void * samplerArg,
const char * my_space_profile,
int grid_size,
double icc_profile_version,
const char * my_abstract_description,
const char ** my_abstract_descriptions,
const char * my_abstract_file_name,
const char * provider,
const char * vendor,
const char * my_license,
const char * device_model,
const char * device_manufacturer,
const char ** my_meta_data,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = 0;
int error = 0;
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
my_abstract_description,
provider, vendor, my_license,
device_model, device_manufacturer, NULL);
if(!profile) goto lcm2CreateAbstractProfileClean;
if(my_meta_data)
lcm2AddMetaTexts ( profile, my_meta_data[0], &my_meta_data[1], cmsSigMetaTag );
error = lcm2CreateProfileLutByFunc( profile, samplerMySpace, samplerArg,
"*lab", my_space_profile, "*lab",
grid_size, cmsSigAToB0Tag );
if(error) goto lcm2CreateAbstractProfileClean;
lcm2AddMluDescription ( profile, my_abstract_descriptions,
cmsSigProfileDescriptionMLTag
);
if(my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
}
if(h_profile)
*h_profile = profile;
else
cmsCloseProfile( profile );
lcm2CreateAbstractProfileClean:
return error;
}
/** Function lcm2CreateAbstractTemperatureProfile
* @brief Create a effect profile of type abstract in ICC*Lab PCS from Kelvin
*
* @param[in] kelvin the desired temperature in Kelvin; ICC reference (D50) is 5000 Kelvin
* @param[in] source_white_profile a profile, e.g. the actual monitor profile; optional, default is D50
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2017/05/17 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractTemperatureProfile (
float kelvin,
cmsHPROFILE source_white_profile,
int grid_size,
double icc_profile_version,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
cmsCIEXYZ * source_white = NULL;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,white_point,atom",
"EFFECT_type", "CIEab",
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_ab[2];
char * desc = NULL;
if(error) return 1;
if(source_white_profile)
{
if(cmsIsTag(source_white_profile, cmsSigProfileDescriptionTag))
{
cmsUInt32Number n = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, NULL, 0);
if(n)
{
desc = calloc( n+1, sizeof(char) );
if(!desc) goto lcm2CreateAbstractTemperatureProfileClean;
cmsUInt32Number nr = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, desc, n);
if(n != nr)
lcm2msg_p( 301, NULL, "found propblem reading desc tag: %d %d", n,nr);
}
}
source_white = cmsReadTag( source_white_profile, cmsSigMediaWhitePointTag ); // MediaWhitePointTag
}
i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i) { i_curve[i] = i_curve[0]; }
if(!error)
{
cmsCIExyY xyWhitePoint;
cmsFloat64Number TempK = kelvin;
/* 4000 - 25000 K */
cmsWhitePointFromTemp( &xyWhitePoint, TempK );
cmsCIEXYZ WhitePoint;
const cmsCIEXYZ * reference_white = cmsD50_XYZ();
float max_brightness;
cmsxyY2XYZ( &WhitePoint, &xyWhitePoint );
cmsCIELab LabWhitePoint;
cmsCIELab SrcLabWhitePoint;
if(source_white)
reference_white = source_white;
cmsXYZ2Lab( reference_white, &LabWhitePoint, &WhitePoint );
icc_ab[0] = LabWhitePoint.a/128.0;
icc_ab[1] = LabWhitePoint.b/128.0;
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
cmsXYZ2Lab( cmsD50_XYZ(), &SrcLabWhitePoint, reference_white );
cmsXYZ2Lab( cmsD50_XYZ(), &LabWhitePoint, &WhitePoint );
lcm2msg_p( 302, NULL, "SrcW: %g %g %g LabW: %g %g %g diff: %g %g max brightness: %g",
SrcLabWhitePoint.L, SrcLabWhitePoint.a, SrcLabWhitePoint.b,
LabWhitePoint.L, LabWhitePoint.a, LabWhitePoint.b,
icc_ab[0], icc_ab[1], max_brightness );
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
if(error) goto lcm2CreateAbstractTemperatureProfileClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Reddish %d K (www.oyranos.org)", (int)kelvin );
} else if(icc_ab[1] == 0) {
sprintf( kelvin_name, "%d K (www.oyranos.org)", (int)kelvin );
kelvin_meta[1] = "neutral,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bluish %d K (www.oyranos.org)", (int)kelvin );
kelvin_meta[1] = "bluish,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
if(source_white_profile)
{
if(desc && strlen(desc) < 900)
sprintf( &kelvin_name[strlen(kelvin_name)], " - %s", desc);
if(icc_ab[1] > 0)
{
kelvin_meta[1] = "reddish,white_point,atom,device";
kelvin_meta[3] = "yes,reddish,kelvin";
} else if(icc_ab[1] == 0) {
kelvin_meta[1] = "neutral,white_point,atom,device";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
kelvin_meta[1] = "bluish,white_point,atom,device";
kelvin_meta[3] = "yes,bluish,kelvin";
}
}
if(!error)
/* profile fragment creation */
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2017",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"CIE*Lab",
"http://www.cie.co.at",
NULL);
if(!profile) error = 1;
if(!error)
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointLab, icc_ab,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractTemperatureProfileClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
*my_abstract_file_name = kelvin_name;
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateAbstractWhitePointProfileLab
* @brief Create a effect profile of type abstract in ICC*Lab PCS for white point adjustment
*
* These profiles can be applied to 1D / per single channel only adjustments.
* It will be marked with EFFECT_linear=yes in the meta tag.
*
* @param[in] cie_a CIE*a correction value in -0.5 - 0.5 range
* @param[in] cie_b CIE*b correction value in -0.5 - 0.5 range
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2017/06/02 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractWhitePointProfileLab (
double cie_a,
double cie_b,
int grid_size,
double icc_profile_version,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,white_point,linear,atom",
"EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */
"EFFECT_type", "CIEab",
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_ab[2] = {cie_a, cie_b};
if(error) return 1;
i_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i)
{ i_curve[i] = i_curve[0]; }
if(!error)
{
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
double max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
if(error) goto lcm2CreateAbstractWhitePointProfileClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Reddish CIE*a %g CIE*b %g", cie_a, cie_b );
} else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) {
sprintf( kelvin_name, "CIE*a %g CIE*b %g", cie_a, cie_b );
kelvin_meta[1] = "neutral,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bluish CIE*a %g CIE*b %g", cie_a, cie_b );
kelvin_meta[1] = "bluish,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2018",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"CIE*Lab",
"http://www.cie.co.at",
NULL);
if(!profile) goto lcm2CreateAbstractWhitePointProfileClean;
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointLab, icc_ab,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractWhitePointProfileClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
*my_abstract_file_name = kelvin_name;
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateAbstractWhitePointProfileBradford
* @brief Create a effect profile of type abstract in ICC*Lab PCS for white point adjustment
*
* These profiles can be applied to 1D / per single channel only adjustments.
* It will be marked with EFFECT_linear=yes in the meta tag.
*
* @param[in] src_iccXYZ source media white point
* @param[in] illu_iccXYZ ICC*XYZ illuminant in 0.0 - 2.0 range
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[in] flags - 0x01 : return only fast my_abstract_file_name, without expensive profile computation
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2018/07/25
* @since 2017/06/02 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractWhitePointProfileBradford (
double * src_iccXYZ,
double * illu_iccXYZ,
int grid_size,
double icc_profile_version,
int flags,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,type,white_point,linear,atom",
"EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"EFFECT_type", "bradford",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_XYZ[6] = { src_iccXYZ[0], src_iccXYZ[1], src_iccXYZ[2],
illu_iccXYZ[0], illu_iccXYZ[1], illu_iccXYZ[2]};
double icc_ab[2] = {0,0};
if(error) return 1;
if(!(flags & 0x01)) /* skip computation */
{
i_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i)
{ i_curve[i] = i_curve[0]; }
}
if(!error)
{
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
double max_brightness;
double src_Lab[3], dst_Lab[3];
lcm2iccXYZ2iccLab( src_iccXYZ, src_Lab );
lcm2iccXYZ2iccLab( illu_iccXYZ, dst_Lab );
icc_ab[0] = dst_Lab[1] - src_Lab[1];
icc_ab[1] = dst_Lab[2] - src_Lab[2];
max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
if(!(flags & 0x01)) /* skip computation */
{
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
}
if(error) goto lcm2CreateAbstractWhitePointProfileBClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Bradford Reddish CIE*a %g CIE*b %g v1 lcm2", icc_ab[0], icc_ab[1] );
} else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) {
sprintf( kelvin_name, "Bradford CIE*a %g CIE*b %g v1 lcm2", icc_ab[0], icc_ab[1] );
kelvin_meta[1] = "neutral,type,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bradford Bluish CIE*a %g CIE*b %g v1 lcm2", icc_ab[0], icc_ab[1] );
kelvin_meta[1] = "bluish,type,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
*my_abstract_file_name = kelvin_name;
if(flags & 0x01) /* skip computation */
{
return error;
}
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2018",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"Bradford",
"http://www.cie.co.at",
NULL);
if(!profile) goto lcm2CreateAbstractWhitePointProfileBClean;
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointBradford, icc_XYZ,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractWhitePointProfileBClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateProfileFragment
* @brief Create a color profile starter
*
* In case both the in_space_profile and out_space_profile arguments are set
* to "*lab", the profile will be set to class abstract. In case the
* in_space_profile is not "*lab" and the later one is different, a color
* profile of class input will be generated. With in_space_profile not "*lab"
* and out_space_profile "*lab" a color profile of class output will be
* generated. Note such profiles have initially no backward LUT and can not
* be used for inverse color transforms, which might be a problem for general
* purpose ICC profiles. But you can add more tables if needed by passing in a
* previously created profile.
*
* All profiles generated by this function are meant to be filled with
* colorimetric data by e.g. lcm2CreateProfileLutByFunc() or
* lcm2CreateICCMatrixProfile2().
*
* Here a code example:
* @code
cmsHPROFILE profile = lcm2CreateProfileFragment (
"*srgb", // sRGB
"*lab", // CIE*Lab
2.3,
"MySpace (MyProject)",
"My Project 2016",
"My Name",
ICC_2011_LICENSE,
"My Box", "www.mydomain.net", NULL
);
@endcode
*
* @param[in] in_space_profile input color space; for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space; for wildcards see
* lcm2OpenProfileFile()
* @param[in] icc_profile_version 2.3 or 4.3
* @param[in] my_abstract_description internal profile name
* @param[in] provider e.g. "My Project 2016"
* @param[in] vendor e.g. "My Name"
* @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile."
* first %s is provider string arg and
* second %s is filled by vendor string arg
* @param[in] device_model e.g. "My Set"
* @param[in] device_manufacturer e.g. "www.mydomain.net"; hint:
* lcms <= 2.08 writes a malformed desc tag
* @param[in,out] h_profile use existing profile; optional
*
* @version Oyranos: 0.9.6
* @date 2016/03/06
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
cmsHPROFILE lcm2CreateProfileFragment(
const char * in_space_profile,
const char * out_space_profile,
double icc_profile_version,
const char * my_abstract_description,
const char * provider,
const char * vendor,
const char * my_license,
const char * device_model,
const char * device_manufacturer,
cmsHPROFILE h_profile
)
{
cmsHPROFILE h_in_space = 0,
h_out_space = 0;
cmsColorSpaceSignature csp_in, csp_out;
cmsProfileClassSignature profile_class = cmsSigAbstractClass;
cmsMLU * mlu[4] = {0,0,0,0};
int i;
char * license = NULL;
if(!h_profile)
{ h_profile = cmsCreateProfilePlaceholder( 0 ); } if(!h_profile) goto lcm2CreateProfileFragmentClean;
if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL );
if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL );
csp_in = cmsGetColorSpace( h_in_space );
csp_out = cmsGetColorSpace( h_out_space );
cmsSetProfileVersion( h_profile, icc_profile_version );
#define CSP_IS_PCS(csp) (csp == cmsSigLabData || csp == cmsSigXYZData)
if( CSP_IS_PCS(csp_in) && CSP_IS_PCS(csp_out) )
profile_class = cmsSigAbstractClass;
else if( CSP_IS_PCS(csp_out) )
profile_class = cmsSigInputClass;
else if( CSP_IS_PCS(csp_in) )
profile_class = cmsSigOutputClass;
else
profile_class = cmsSigLinkClass;
cmsSetDeviceClass( h_profile, profile_class );
cmsSetColorSpace( h_profile, csp_in );
cmsSetPCS( h_profile, csp_out );
for(i = 0; i < 4; ++i)
mlu[i] = cmsMLUalloc(0,1);
if(!(mlu[0] && mlu[1] && mlu[2] && mlu[3]))
return h_profile;
cmsMLUsetASCII(mlu[0], "EN", "us", my_abstract_description);
cmsWriteTag( h_profile, cmsSigProfileDescriptionTag, mlu[0] );
if(device_model)
{
cmsMLUsetASCII(mlu[1], "EN", "us", device_model);
cmsWriteTag( h_profile, cmsSigDeviceModelDescTag, mlu[1]);
}
if(device_manufacturer)
{
cmsMLUsetASCII(mlu[2], "EN", "us", device_manufacturer);
cmsWriteTag( h_profile, cmsSigDeviceMfgDescTag, mlu[2]);
}
license = (char *) malloc( strlen(my_license) + strlen(provider) + strlen(vendor) + 1 );
if(!license) goto lcm2CreateProfileFragmentClean;;
sprintf( license, my_license, provider, vendor );
cmsMLUsetASCII(mlu[3], "EN", "us", license);
cmsWriteTag( h_profile, cmsSigCopyrightTag, mlu[3]);
cmsWriteTag( h_profile, cmsSigMediaWhitePointTag, cmsD50_XYZ() );
lcm2CreateProfileFragmentClean:
if(h_in_space) { cmsCloseProfile( h_in_space ); } h_in_space = 0;
if(h_out_space) { cmsCloseProfile( h_out_space ); } h_out_space = 0;
for(i = 0; i < 4; ++i)
cmsMLUfree( mlu[i] );
lcm2Free_m(license);
return h_profile;
}
int isBigEndian ()
{ union { unsigned short u16; unsigned char c; } test = { .u16 = 1 }; return !test.c; }
/* UTF-8 to WCHAR_T conversion */
typedef uint32_t UTF32; /* at least 32 bits */
typedef uint16_t UTF16; /* at least 16 bits */
typedef uint8_t UTF8; /* typically 8 bits */
typedef unsigned char Boolean; /* 0 or 1 */
/* Some fundamental constants */
#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
#define UNI_MAX_BMP (UTF32)0x0000FFFF
#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
typedef enum {
conversionOK, /* conversion successful */
sourceExhausted, /* partial character in source, but hit end */
targetExhausted, /* insuff. room in target for conversion */
sourceIllegal /* source sequence is illegal/malformed */
} lcm2UtfConversionResult;
typedef enum {
strictConversion = 0,
lenientConversion
} lcm2UtfConversionFlags;
static const int halfShift = 10; /* used for shifting by 10 bits */
static const UTF32 halfBase = 0x0010000UL;
static const UTF32 halfMask = 0x3FFUL;
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_HIGH_END (UTF32)0xDBFF
#define UNI_SUR_LOW_START (UTF32)0xDC00
#define UNI_SUR_LOW_END (UTF32)0xDFFF
#define false 0
#define true 1
/*
* Index into the table below with the first byte of a UTF-8 sequence to
* get the number of trailing bytes that are supposed to follow it.
* Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
* left as-is for anyone who may want to do such conversion, which was
* allowed in earlier algorithms.
*/
static const char trailingBytesForUTF8[256] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
/*
* Magic values subtracted from a buffer value during UTF8 conversion.
* This table contains as many values as there might be trailing bytes
* in a UTF-8 sequence.
*/
static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
0x03C82080UL, 0xFA082080UL, 0x82082080UL };
/*
* Utility routine to tell whether a sequence of bytes is legal UTF-8.
* This must be called with the length pre-determined by the first byte.
* If not calling this from ConvertUTF8to*, then the length can be set by:
* length = trailingBytesForUTF8[*source]+1;
* and the sequence is illegal right away if there aren't that many bytes
* available.
* If presented with a length > 4, this returns false. The Unicode
* definition of UTF-8 goes up to 4-byte sequences.
*/
static Boolean isLegalUTF8(const UTF8 *source, int length)
{
UTF8 a;
const UTF8 *srcptr = source+length;
switch (length) {
default: return false;
/* Everything else falls through when "true"... */
case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH
case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH
case 2: if ((a = (*--srcptr)) > 0xBF) return false;
switch (*source) {
/* no fall-through in this inner switch */
case 0xE0: if (a < 0xA0) return false; break;
case 0xED: if (a > 0x9F) return false; break;
case 0xF0: if (a < 0x90) return false; break;
case 0xF4: if (a > 0x8F) return false; break;
default: if (a < 0x80) return false; OY_FALLTHROUGH
} OY_FALLTHROUGH
case 1: if (*source >= 0x80 && *source < 0xC2) return false;
}
if (*source > 0xF4) return false;
return true;
}
lcm2UtfConversionResult lcm2ConvertUTF8toUTF16 (const UTF8** sourceStart, const UTF8* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, lcm2UtfConversionFlags flags)
{
lcm2UtfConversionResult result = conversionOK;
const UTF8* source = *sourceStart;
UTF16* target = *targetStart;
while (source < sourceEnd) {
UTF32 ch = 0;
unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
if (source + extraBytesToRead >= sourceEnd) {
result = sourceExhausted; break;
}
/* Do this check whether lenient or strict */
if (! isLegalUTF8(source, extraBytesToRead+1)) {
result = sourceIllegal;
break;
}
/*
* The cases all fall through. See "Note A" below.
*/
switch (extraBytesToRead) {
case 5: ch += *source++; ch <<= 6; OY_FALLTHROUGH/* remember, illegal UTF-8 */
case 4: ch += *source++; ch <<= 6; OY_FALLTHROUGH /* remember, illegal UTF-8 */
case 3: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 2: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 1: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 0: ch += *source++; OY_FALLTHROUGH
}
ch -= offsetsFromUTF8[extraBytesToRead];
if (target >= targetEnd) {
source -= (extraBytesToRead+1); /* Back up source pointer! */
result = targetExhausted; break;
}
if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
/* UTF-16 surrogate values are illegal in UTF-32 */
if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
if (flags == strictConversion) {
source -= (extraBytesToRead+1); /* return to the illegal value itself */
result = sourceIllegal;
break;
} else {
*target++ = UNI_REPLACEMENT_CHAR;
}
} else {
*target++ = (UTF16)ch; /* normal case */
}
} else if (ch > UNI_MAX_UTF16) {
if (flags == strictConversion) {
result = sourceIllegal;
source -= (extraBytesToRead+1); /* return to the start */
break; /* Bail out; shouldn't continue */
} else {
*target++ = UNI_REPLACEMENT_CHAR;
}
} else {
/* target is a character in range 0xFFFF - 0x10FFFF. */
if (target + 1 >= targetEnd) {
source -= (extraBytesToRead+1); /* Back up source pointer! */
result = targetExhausted; break;
}
ch -= halfBase;
*target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
*target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
}
}
*sourceStart = source;
*targetStart = target;
return result;
}
wchar_t * lcm2Utf8ToWchar ( const char * text )
{
wchar_t * wchar_out, * tmp_out;
char * in, * tmp_in;
size_t in_len = strlen(text),
out_len = in_len*sizeof(wchar_t)+sizeof(wchar_t);
lcm2UtfConversionResult error;
if(!in_len) return 0;
else ++in_len;
tmp_out = wchar_out = calloc( in_len+1, sizeof(wchar_t) );
in = tmp_in = strdup( text );
error = lcm2ConvertUTF8toUTF16( (const UTF8**)&in, (const UTF8*)in+in_len, (UTF16**)&tmp_out, (UTF16*)(tmp_out+out_len), lenientConversion );
if(error == conversionOK)
{
/* store UTF16BE in wchar_t for lcms2 */
uint16_t * icc_utf16 = (uint16_t*) wchar_out;
int i;
for(i = in_len; i >= 0; --i) wchar_out[i] = icc_utf16[i];
}
else
{
lcm2msg_p( 300, NULL, "error[%d] %lu %lu %s", error, in_len, out_len, text );
lcm2Free_m(wchar_out);
}
lcm2Free_m( tmp_in );
return wchar_out;
}
/** Function lcm2AddMluDescription
* @brief Add translated texts to a profile
*
* Iterates over the provided string list converts from "UTF-8" input
* to "WCHAR_T" for lcms and
* does byteswapping on little endian machines.
*
* Here a code example:
* @code
const char * texts[] = {
"de", "DE", "Mein Text",
"en", "US", "My Text"
};
lcm2AddMluDescription ( profile, texts,
cmsSigProfileDescriptionMLTag
);
@endcode
*
* @param[in,out] profile color profile
* @param[in] texts language + country + text list
* @param[in] tag_sig signature
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2AddMluDescription ( cmsHPROFILE profile,
const char * texts[],
cmsTagSignature tag_sig )
{
int n = 0, i;
cmsMLU * mlu = NULL;
if(texts)
while( texts[n] ) ++n;
if(!n) return;
mlu = cmsMLUalloc( 0, n/3 + 1 );
if(!mlu) return;
for( i = 0; i < n; i += 3 )
{
char lang[4] = {0,0,0,0}, country[4] = {0,0,0,0};
const char * text = texts[i+2];
wchar_t * wchar_out;
wchar_out = lcm2Utf8ToWchar( text );
if(!wchar_out) continue;
/* the language code is stored as readable 4 byte string */
lang[0] = texts[i+0][0]; lang[1] = texts[i+0][1];
country[0] = texts[i+1][0]; country[1] = texts[i+1][1];
cmsMLUsetWide( mlu, lang, country, wchar_out );
lcm2Free_m( wchar_out );
}
cmsWriteTag( profile, tag_sig, mlu );
cmsMLUfree( mlu );
}
/** Function lcm2AddMetaTexts
* @brief Add meta data to a profile
*
* Iterates over the provided string list converts from "UTF-8" input
* to "WCHAR_T" for lcms and
* does byteswapping on little endian machines.
*
* Here a code example:
* @code
const char * texts[] = {
"GROUP_key1", "value1",
"DOMAIN_key2", "value2"
};
lcm2AddMetaTexts ( profile, "GROUP_,DOMAIN_", texts,
cmsSigMetaTag
);
@endcode
*
* A prefix allows for grouping of keys like "EDID_" or "EXIF_".
* The prefix part might be cut off in some cases to access an other level
* of keys. Think of "EDID_model" for monitors and "EXIF_model" for cameras,
* which both represent the key "model" concept.
*
* @param[in,out] profile color profile
* @param[in] prefixes The used uppercase prefix list.
* @param[in] key_value key + value list
* @param[in] tag_sig signature
*
* @version Oyranos: 0.9.7
* @date 2017/02/11
* @since 2017/02/11 (Oyranos: 0.9.7)
*/
void lcm2AddMetaTexts ( cmsHPROFILE profile,
const char * prefixes,
const char * key_value[],
cmsTagSignature tag_sig )
{
int n = 0, i;
cmsHANDLE dict = NULL;
cmsContext contextID = cmsCreateContext( NULL,NULL );
wchar_t * wchar_key = NULL, * wchar_val = NULL;
if(key_value)
while( key_value[n] ) ++n;
if(n)
dict = cmsDictAlloc( contextID );
else
lcm2msg_p( 300, NULL, "nothing to write %s", __func__ );
if(!dict)
return;
if(prefixes)
{
wchar_key = lcm2Utf8ToWchar( "prefix" );
wchar_val = lcm2Utf8ToWchar( prefixes );
}
if(wchar_key && wchar_val)
cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL );
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
for( i = 0; i < n; i += 2 )
{
const char * key = key_value[i+0],
* val = key_value[i+1];
wchar_key = lcm2Utf8ToWchar(key),
wchar_val = lcm2Utf8ToWchar(val);
if(!wchar_key || !wchar_val)
{
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
continue;
}
cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL );
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
}
cmsWriteTag( profile, tag_sig, dict );
cmsDictFree( dict );
}
/** Function lcm2CreateICCMatrixProfile2
* @brief Create a profile from primaries, white point and one gamma value
*
* Used for ICC from EDID, Camera RAW etc. Marti calls these matrix/shaper.
* @code
// create linear space with REC.709/sRGB primaries and D65 white point
cmsHPROFILE h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64,0.33, 0.30,0.60, 0.15,0.06, 0.3127,0.329 );
@endcode
*
* @version Oyranos: 0.9.6
* @date 2016/03/04
* @since 2009/10/24 (Oyranos: 0.1.10)
*/
cmsHPROFILE lcm2CreateICCMatrixProfile2 (
float gamma,
float rx, float ry,
float gx, float gy,
float bx, float by,
float wx, float wy )
{
cmsCIExyYTRIPLE p;
cmsToneCurve * g[3] = {0,0,0};
/* 0.31271, 0.32902 D65 */
cmsCIExyY wtpt_xyY;
cmsHPROFILE lp = 0;
p.Red.x = rx;
p.Red.y = ry;
p.Red.Y = 1.0;
p.Green.x = gx;
p.Green.y = gy;
p.Green.Y = 1.0;
p.Blue.x = bx;
p.Blue.y = by;
p.Blue.Y = 1.0;
wtpt_xyY.x = wx;
wtpt_xyY.y = wy;
wtpt_xyY.Y = 1.0;
g[0] = g[1] = g[2] = cmsBuildGamma(0, (double)gamma);
if(!g[0]) return NULL;
lp = cmsCreateRGBProfile( &wtpt_xyY, &p, g);
cmsFreeToneCurve( g[0] );
return lp;
}
/** Function lcm2MessageFunc
* @brief default message function to console
*
* The default message function is used as a message printer to the console
* from library start.
*
* @param code a message code understood be your message
* handler or openiccMSG_e
* @param context_object a openicc object is expected
* @param format the text format string for following args
* @param ... the variable args fitting to format
* @return 0 - success; 1 - error
*
* @version OpenICC: 0.1.0
* @date 2009/07/20
* @since 2008/04/03 (OpenICC: 0.1.0)
*/
int lcm2MessageFunc ( int/*openiccMSG_e*/ code OY_UNUSED,
const void * context_object OY_UNUSED,
const char * format,
... )
{
char * text = 0;
int error = 0;
va_list list;
size_t sz = 0;
int len = 0;
va_start( list, format);
len = vsnprintf( text, sz, format, list);
va_end ( list );
{
text = calloc( sizeof(char), len+2 );
if(!text)
{
fprintf(stderr, "Could not allocate 256 byte of memory.\n");
return 1;
}
va_start( list, format);
len = vsnprintf( text, len+1, format, list);
va_end ( list );
}
if(text)
fprintf( stderr, "%s\n", text );
lcm2Free_m( text );
return error;
}
lcm2Message_f lcm2msg_p = lcm2MessageFunc;
/** @brief set a custom message function
*
* Use to connect to user message system.
*/
int lcm2MessageFuncSet ( lcm2Message_f message_func )
{
if(message_func)
lcm2msg_p = message_func;
else
lcm2msg_p = lcm2MessageFunc;
return 1;
}
/** @brief run time API version
*/
int lcm2Version ( )
{
return LCM2PROFILER_API;
}
/** @} */ /* profiler */
/** \addtogroup profiler
*
* Oyranos ICC Profiler API provides a platformindependent C interface to generate
* ICC profiles. It's main purpose is to generate ICC Profiles in a programatic way.
* The only dependency is littleCMS 2
* <a href="http://www.littlecms.com">www.littlecms.com</a>.
* It reduces the need of many of the lcms2
* boilerplate for format independent sampling, multi localised strings from UTF8
* and more. The sampler collection contains effects and color space converters.
* The code consists of one source file and a header. So it can easily
* be placed inside your project.
*
*
* @section api API Documentation
* The Oyranos ICC Profiler API is contained in the lcm2_profiler.h header file.
*
* The high level API takes few arguments and generates a profile in
* one go.
* Effect profiles can be created in one call
* by lcm2CreateAbstractProfile(). It needs a @ref samplers function, which
* fills the Look Up Table (LUT). Three APIs exist to generate white point
* effects, lcm2CreateAbstractTemperatureProfile() and
* lcm2CreateAbstractWhitePointProfileLab() or
* lcm2CreateAbstractWhitePointProfileBradford(). These above high level APIs allow to
* write the profile to disc in one go.
*
* The lower level APIs can be used to customise the profile generation.
* Basic matrix/shaper profiles can be created with
* lcm2CreateICCMatrixProfile2() and filled with custom texts in
* lcm2CreateProfileFragment().
*
* The following low level code sample comes from @ref lcm2_profiler.c.
* The code sets up a basic profile description and color spaces:
* @dontinclude lcm2_profiler.c
* @code
* // prepare some variables
* double icc_profile_version = 2.3;
* double icc_ab[2] = {0.0, 0.0};
* cmsHPROFILE profile;
* const char * kelvin_name = "5000 K"
* int error;
* int grid_size = 17;
* cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0);
for(i = 1; i < 3; ++i) { i_curve[i] = o_curve[i] = i_curve[0]; }
* @endcode
* @skip fragment
@until cmsSigAToB0Tag
*
* Profile i/o happens with lcm2OpenProfileFile(), which takes file names and
* a few wildcards as arguments. lcm2WriteProfileToFile() helps writing of
* canonical profile names. lcm2WriteProfileToMem() writes a profile to a
* custom memory allocator.
*
* Most of the functions come with examples.
*
*/
|
test_linear_algebra.c | /*
* test_linear_algebra.c
* a small program to test the linear algebra code
* Brian J Gravelle
* ix.cs.uoregon.edu/~gravelle
* gravelle@cs.uoregon.edu
* See LICENSE file for licensing information and boring legal stuff
* If by some miricale you find this software useful, thanks are accepted in
* the form of chocolate or introductions to potential employers.
*/
#include "linear_algebra.h"
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
void test_multiply();
void test_multiply_2();
void test_add();
void test_transpose();
int main(int argc, char **argv) {
char temp[16];
int block_size = 128;
if (argc > 1)
block_size = atoi(argv[1]);
// omp_set_num_threads(48);
#ifdef USE_CALI
cali_init();
cali_id_t thread_attr = cali_create_attribute("thread_id", CALI_TYPE_INT, CALI_ATTR_ASVALUE | CALI_ATTR_SKIP_EVENTS);
#ifdef USE_CALI_REG
#pragma omp parallel
{
cali_set_int(thread_attr, omp_get_thread_num());
}
#endif
#ifdef USE_CALI_UNCORE
cali_set_int(thread_attr, omp_get_thread_num());
#endif
#endif
#pragma omp parallel
{
if (omp_get_thread_num() == 1)
printf("Number of threads: %d\n",omp_get_num_threads());
}
#ifdef USE_LIKWID
LIKWID_MARKER_INIT;
LIKWID_MARKER_REGISTER("block_mm");
#endif
// printf("Enter 'c' to continue. Note it may require multiple entries.\n");
// scanf("%s", temp);
// test_add();
// scanf("%s", temp);
// test_transpose();
// scanf("%s", temp);
// test_multiply();
test_multiply_2();
// scanf("%s", temp);
#ifdef USE_LIKWID
LIKWID_MARKER_CLOSE;
#endif
printf("Bye now!\n\n");
return 0;
}
void test_add() {
int col_A = 2, row_A = 3;
double A[3][2] = {{2,2},
{2,2},
{2,2}};
int col_B = 2, row_B = 3;
double B[3][2] = {{2,2},
{2,2},
{2,2}};
int col_C = 2, row_C = 3;
double C[3][2] = {{2,2},
{2,2},
{2,2}};
printf("\nA:\n");
print_matrix((double**)A, row_A, col_A);
printf("\nB:\n");
print_matrix((double**)B, row_B, col_B);
printf("\nC:\n");
print_matrix((double**)C, row_C, col_C);
printf("\n");
add_matrix((double**)A, row_A, col_A, (double**)B, (double**)C);
printf("\nC <- A + B:\n");
print_matrix((double**)C, row_C, col_C);
printf("\n");
}
void test_multiply() {
int col_A = 3, row_A = 3;
int col_B = 2, row_B = 3;
int col_C = 2, row_C = 3;
int i,j,k;
double** A = (double**)malloc(row_A*sizeof(double*));
double** B = (double**)malloc(row_B*sizeof(double*));
double** Bt = (double**)malloc(col_B*sizeof(double*));
double** C = (double**)malloc(row_A*sizeof(double*));
for (i=0; i<row_A; i++) A[i] = (double*)malloc(col_A*sizeof(double));
for (i=0; i<row_B; i++) B[i] = (double*)malloc(col_B*sizeof(double));
for (i=0; i<col_B; i++) Bt[i] = (double*)malloc(row_B*sizeof(double));
for (i=0; i<row_A; i++) C[i] = (double*)malloc(col_B*sizeof(double));
for (i=0; i<row_A; i++)
for (j=0; j<col_A; j++)
A[i][j] = 1.0 + i + j;
for (i=0; i<row_B; i++)
for (j=0; j<col_B; j++)
B[i][j] = 1.0 + i + j;
for (i=0; i<row_A; i++)
for (j=0; j<col_B; j++)
C[i][j] = 2.0;
transpose_matrix(B, row_B, col_B, Bt);;
printf("\nA:\n");
print_matrix((double**)A, row_A, col_A);
printf("\nB:\n");
print_matrix((double**)B, row_B, col_B);
printf("\nC:\n");
print_matrix((double**)C, row_C, col_C);
printf("\n");
multiply_matrix_v((double**)A, row_A, col_A, (double**)Bt, col_B, (double**)C);
printf("\nC <- A * B:\n");
print_matrix((double**)C, row_C, col_C);
printf("\n");
}
void test_multiply_2() {
int size = 17;
#ifdef ORDER
size = ORDER;
#endif
printf("Running blocked matrix multiply.\n");
printf(" Matrix side length: %d\n", size);
printf(" Block columns is: %d\n", BLOCK_COLS);
printf(" Block rows is: %d\n", BLOCK_ROWS);
int i,j,k;
double wall_start, wall_end;
double exp_start, exp_end;
double e = 0.0;
double ee = 0.0;
// double a_val = 3.0;
// double b_val = 4.0;
// double v = a_val * b_val * size;
double** A;
double** B;
double** B_T;
double** C;
double** V; // used for validation
int validate = 0;
// validate = 1;
printf("Allocating and filling matrices...\n"); fflush(stdout);
srand((unsigned int)time(NULL));
C = (double**)aligned_alloc(64,size*sizeof(double*));
for (i=0; i<size; i++) C[i] = (double*)aligned_alloc(64,size*sizeof(double));
A = (double**)aligned_alloc(64,size*sizeof(double*));
for (i=0; i<size; i++) A[i] = (double*)aligned_alloc(64,size*sizeof(double));
B = (double**)aligned_alloc(64,size*sizeof(double*));
for (i=0; i<size; i++) B[i] = (double*)aligned_alloc(64,size*sizeof(double));
B_T = (double**)aligned_alloc(64,size*sizeof(double*));
for (i=0; i<size; i++) B_T[i] = (double*)aligned_alloc(64,size*sizeof(double));
C = (double**)aligned_alloc(64,size*sizeof(double*));
for (i=0; i<size; i++) C[i] = (double*)aligned_alloc(64,size*sizeof(double));
V = (double**)aligned_alloc(64,size*sizeof(double*));
for (i=0; i<size; i++) V[i] = (double*)aligned_alloc(64,size*sizeof(double));
#pragma omp parallel for private(i,j,k)
for (i=0; i<size; i++) {
for (j=0; j<size; j++) {
A[i][j] = (double)rand()/(double)(RAND_MAX);
B[i][j] = (double)rand()/(double)(RAND_MAX);
C[i][j] = 0.0;
V[i][j] = 0.0;
}
}
transpose_matrix(B, size, size, B_T);
//fill validation matrix based on naive implementation
if (validate) {
printf("producing validation matrix...\n"); fflush(stdout);
multiply_matrix_vp(A, size, size, B_T, size, V);
}
// printf(" sizeof A is: %d\n", sizeof(A[0]));
printf("Performing multiplication experiment...\n"); fflush(stdout);
wall_start = omp_get_wtime();
// printf("Using default sequential implementation...\n"); fflush(stdout);
// multiply_matrix_s(A, size, size, B, size, C);
#ifdef DEFAULT
printf("Using default parallel implementation..."); fflush(stdout);
exp_start = omp_get_wtime();
multiply_matrix_d(A, size, size, B, size, C);
exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
#endif
#ifdef INTERCHANGE
printf("Using loop interchanged parallel implementation..."); fflush(stdout);
exp_start = omp_get_wtime();
multiply_matrix_i(A, size, size, B, size, C);
exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
#endif
#ifdef TRANSPOSE
printf("Using transposed parallel implementation..."); fflush(stdout);
exp_start = omp_get_wtime();
multiply_matrix_t(A, size, size, B_T, size, C);
exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
#endif
#ifdef UNROLLJAM
printf("Using unroll-jam parallel implementation..."); fflush(stdout);
exp_start = omp_get_wtime();
multiply_matrix_uj(A, size, size, B_T, size, C);
exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
#endif
#ifdef BLOCKED
printf("Using blocked parallel implementation..."); fflush(stdout);
exp_start = omp_get_wtime();
multiply_matrix_b(A, size, size, B_T, size, C);
exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
#endif
#ifdef BLOCK_UNROLL
printf("Using blocked unrolled implementation..."); fflush(stdout);
exp_start = omp_get_wtime();
multiply_matrix_bu(A, size, size, B_T, size, C);
exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
#endif
// printf("Using blocked interchange parallel implementation..."); fflush(stdout);
// exp_start = omp_get_wtime();
// multiply_matrix_bi(A, size, size, B_T, size, C);
// exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
// printf("Using flipped, blocked parallel implementation...\n"); fflush(stdout);
// exp_start = omp_get_wtime();
// multiply_matrix_f(A, size, size, B_T, size, C);
// exp_end = omp_get_wtime(); printf(" %fs\n", (exp_end - exp_start));
wall_end = omp_get_wtime();
if (validate) {
printf("Checking result...\n"); fflush(stdout);
// #pragma omp parallel for
// #pragma omp parallel for reduction(+:ee) private(i,j)
for (i=0; i<size; i++) {
for (j=0; j<size; j++) {
ee += (C[i][j] - V[i][j])*(C[i][j] - V[i][j]);
}
}
if (ee > 0.05) {
printf("Multiply complete: Falied\n");
printf("Error: %f\n", ee);
} else {
printf("Multiply complete: Success\n");
printf("Time: %fs\n", (wall_end - wall_start));
printf("FLOPS Theoretical: %f\n", ((double)size*(double)size*(double)size*2.0));
printf("FLOPS per second: %f\n", ((double)size*(double)size*(double)size*2.0/(wall_end - wall_start)));
}
} else {
printf("Multiply complete, no validation.\n");
printf("Time: %fs\n", (wall_end - wall_start));
printf("FLOPS Theoretical: %f\n", ((double)size*(double)size*(double)size*2.0));
printf("FLOPS per second: %f\n", ((double)size*(double)size*(double)size*2.0/(wall_end - wall_start)));
}
for (i=0; i<size; i++) {
free(A[i]);
free(B[i]);
free(C[i]);
free(V[i]);
}
free(A);
free(B);
free(C);
free(V);
}
void test_transpose() {
int col_A = 3, row_A = 4;
double A[4][3] = {{1, 2, 3},
{4, 5, 6},
{7, 8, 9},
{10,11,12}};
int col_C = 4, row_C = 3;
double C[3][4] = {{2,2,2,2},
{2,2,2,2},
{2,2,2,2}};
printf("\nA:\n");
print_matrix((double**)A, row_A, col_A);
printf("\nC:\n");
print_matrix((double**)C, row_C, col_C);
printf("\n");
transpose_matrix((double**)A, row_A, col_A, (double**)C);
printf("\nC <- A^T:\n");
print_matrix((double**)C, row_C, col_C);
printf("\n");
}
|
threading.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#ifndef LIGHTGBM_UTILS_THREADING_H_
#define LIGHTGBM_UTILS_THREADING_H_
#include <LightGBM/utils/openmp_wrapper.h>
#include <functional>
#include <vector>
namespace LightGBM {
class Threading {
public:
template<typename INDEX_T>
static inline void For(INDEX_T start, INDEX_T end, const std::function<void(int, INDEX_T, INDEX_T)>& inner_fun) {
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
INDEX_T num_inner = (end - start + num_threads - 1) / num_threads;
if (num_inner <= 0) { num_inner = 1; }
OMP_INIT_EX();
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
OMP_LOOP_EX_BEGIN();
INDEX_T inner_start = start + num_inner * i;
INDEX_T inner_end = inner_start + num_inner;
if (inner_end > end) { inner_end = end; }
if (inner_start < end) {
inner_fun(i, inner_start, inner_end);
}
OMP_LOOP_EX_END();
}
OMP_THROW_EX();
}
};
} // namespace LightGBM
#endif // LightGBM_UTILS_THREADING_H_
|
search.h | // -*- C++ -*-
// Copyright (C) 2007-2021 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.
// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.
// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
// <http://www.gnu.org/licenses/>.
/** @file parallel/search.h
* @brief Parallel implementation base for std::search() and
* std::search_n().
* This file is a GNU parallel extension to the Standard C++ Library.
*/
// Written by Felix Putze.
#ifndef _GLIBCXX_PARALLEL_SEARCH_H
#define _GLIBCXX_PARALLEL_SEARCH_H 1
#include <bits/stl_algobase.h>
#include <parallel/parallel.h>
#include <parallel/equally_split.h>
namespace __gnu_parallel
{
/**
* @brief Precalculate __advances for Knuth-Morris-Pratt algorithm.
* @param __elements Begin iterator of sequence to search for.
* @param __length Length of sequence to search for.
* @param __off Returned __offsets.
*/
template<typename _RAIter, typename _DifferenceTp>
void
__calc_borders(_RAIter __elements, _DifferenceTp __length,
_DifferenceTp* __off)
{
typedef _DifferenceTp _DifferenceType;
__off[0] = -1;
if (__length > 1)
__off[1] = 0;
_DifferenceType __k = 0;
for (_DifferenceType __j = 2; __j <= __length; __j++)
{
while ((__k >= 0) && !(__elements[__k] == __elements[__j-1]))
__k = __off[__k];
__off[__j] = ++__k;
}
}
// Generic parallel find algorithm (requires random access iterator).
/** @brief Parallel std::search.
* @param __begin1 Begin iterator of first sequence.
* @param __end1 End iterator of first sequence.
* @param __begin2 Begin iterator of second sequence.
* @param __end2 End iterator of second sequence.
* @param __pred Find predicate.
* @return Place of finding in first sequences. */
template<typename __RAIter1,
typename __RAIter2,
typename _Pred>
__RAIter1
__search_template(__RAIter1 __begin1, __RAIter1 __end1,
__RAIter2 __begin2, __RAIter2 __end2,
_Pred __pred)
{
typedef std::iterator_traits<__RAIter1> _TraitsType;
typedef typename _TraitsType::difference_type _DifferenceType;
_GLIBCXX_CALL((__end1 - __begin1) + (__end2 - __begin2));
_DifferenceType __pattern_length = __end2 - __begin2;
// Pattern too short.
if(__pattern_length <= 0)
return __end1;
// Last point to start search.
_DifferenceType __input_length = (__end1 - __begin1) - __pattern_length;
// Where is first occurrence of pattern? defaults to end.
_DifferenceType __result = (__end1 - __begin1);
_DifferenceType *__splitters;
// Pattern too long.
if (__input_length < 0)
return __end1;
omp_lock_t __result_lock;
omp_init_lock(&__result_lock);
_ThreadIndex __num_threads = std::max<_DifferenceType>
(1, std::min<_DifferenceType>(__input_length,
__get_max_threads()));
_DifferenceType __advances[__pattern_length];
__calc_borders(__begin2, __pattern_length, __advances);
# pragma omp parallel num_threads(__num_threads)
{
# pragma omp single
{
__num_threads = omp_get_num_threads();
__splitters = new _DifferenceType[__num_threads + 1];
__equally_split(__input_length, __num_threads, __splitters);
}
_ThreadIndex __iam = omp_get_thread_num();
_DifferenceType __start = __splitters[__iam],
__stop = __splitters[__iam + 1];
_DifferenceType __pos_in_pattern = 0;
bool __found_pattern = false;
while (__start <= __stop && !__found_pattern)
{
// Get new value of result.
#pragma omp flush(__result)
// No chance for this thread to find first occurrence.
if (__result < __start)
break;
while (__pred(__begin1[__start + __pos_in_pattern],
__begin2[__pos_in_pattern]))
{
++__pos_in_pattern;
if (__pos_in_pattern == __pattern_length)
{
// Found new candidate for result.
omp_set_lock(&__result_lock);
__result = std::min(__result, __start);
omp_unset_lock(&__result_lock);
__found_pattern = true;
break;
}
}
// Make safe jump.
__start += (__pos_in_pattern - __advances[__pos_in_pattern]);
__pos_in_pattern = (__advances[__pos_in_pattern] < 0
? 0 : __advances[__pos_in_pattern]);
}
} //parallel
omp_destroy_lock(&__result_lock);
delete[] __splitters;
// Return iterator on found element.
return (__begin1 + __result);
}
} // end namespace
#endif /* _GLIBCXX_PARALLEL_SEARCH_H */
|
bucle-forModificado.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv) {
int i, n = 9;
if(argc < 2) {
fprintf(stderr,"\n[ERROR] - Falta nº iteraciones \n");
exit(-1);
}
n = atoi(argv[1]);
#pragma omp parallel for
for (i=0; i<n; i++)
printf("thread %d ejecuta la iteración %d del bucle\n",
omp_get_thread_num(),i);
return(0);
}
|
unsharp.c | #include <stdio.h>
#include <unistd.h>
#include <sys/time.h>
#include <math.h>
#ifdef TIME
#define IF_TIME(foo) foo;
#else
#define IF_TIME(foo)
#endif
#ifndef M
#define M 256
#endif
#ifndef B
#define B 32
#endif
#define in(k,x,y) IN[k][tx*B+x][ty*B+y]
#define blury(k,x,y) BLURY[k][tx*B+x][ty*B+y]
#define sharpen(k,x,y) SHARPEN[k][tx*B+x][ty*B+y]
#define mask(k,x,y) MASK[k][tx*B+x][ty*B+y]
// As P is enclosed by 4 loops, after total expansion of the
// array blurx, the write access to blurx becomes a 4-d access
#define blurx(k,i,j) ((j>=0)? A[k][tx][ty % 2][i][j] : A[k][tx][(ty-1) % 2][i][B+j])
#define blurx_pos(k,i,j) A[k][tx][ty % 2][i][j]
#ifdef VERIFY
#define blurx_verify(k,x,y) BLURXV[k][x][y]
#define blury_verify(k,x,y) BLURYV[k][x][y]
#endif
// As P is enclosed by 4 loops, after total expansion of the
// array blurx, the write access to blurx becomes a 4-d access
// #define blurx(x,y) (y>0)? A[tx,ty,x,y] : A[tx,ty-1,x,B-y]
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
//#include "decls.h"
#ifdef PERFCTR
#include "papiStdEventDefs.h"
#include <papi.h>
#include "papi_defs.h"
#endif
// #include "util.h"
float SHARPEN[3][M][M];
float MASK[3][M][M];
double IN[3][M+4][M];
double A[3][M/B][2][B][B];
double BLURY[3][M][M];
#ifdef VERIFY
float sharpen_verify[3][M][M];
#define in_verify(k,x,y) IN[k][x][y]
double BLURXV[3][M][M];
double BLURYV[3][M][M];
#endif
double t_start, t_end;
void init_array()
{
int i, j;
for (i=0; i<M+2; i++) {
for (j=0; j<M; j++) {
IN[0][i][j] = (i + j);
IN[1][i][j] = (i + j);
IN[2][i][j] = (i + j);
}
}
}
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
int main()
{
int tx, ty, x, y;
int k, i, j;
int trial;
double thresh = 0.23432f;
double weight = 0.23432f;
double _ct3, _ct4, _ct5;
init_array();
#ifdef PERFCTR
PERF_INIT;
#endif
IF_TIME(t_start = rtclock());
for (trial = 0; trial < 10 ; ++trial)
{
#pragma scop
for (k = 0; k <= 2; ++k)
{
for(ty = 0; ty <= (M-1)/B; ++ty)
// #pragma omp parallel for private(tx,x,y)
for(tx = 0; tx <= (M-1)/B; ++tx){
for(x = 0; x <= B-1; ++x){
for(y = 0; y <= B-1; ++y)
blurx_pos(k,x,y) = (in(k,x,y) * 0.0625f) +
(in(k,(x+1),y) * 0.25f) +
(in(k,(x+2),y) * 0.375f) +
(in(k,(x+3),y) * 0.25f) +
(in(k,(x+4),y) * 0.0625f);
for(y = 0; y <= B-1; ++y)
if(ty*B+y>=4)
{
blury(k,x,y) = (blurx(k,x,y) * 0.0625f) +
(blurx(k,x,(y-1)) * 0.25f) +
(blurx(k,x,(y-2)) * 0.375f) +
(blurx(k,x,(y-3)) * 0.25f) +
(blurx(k,x,(y-4)) * 0.0625f);
sharpen(k,x,y) = ((in(k,(x+2),(y-2)) * (1 + weight)) + (blury(k,x,y) * -(weight)));
_ct3 = in(k,(x+2),(y-2));
_ct4 = sharpen(k,x,y);
_ct5 = ((abs((in(k,(x+2),(y-2)) - blury(k,x,y))) < thresh)? _ct3: _ct4);
mask(k,x,y) = _ct5;
}
}
}
}
#pragma endscop
}
IF_TIME(t_end = rtclock());
IF_TIME(fprintf(stderr, "File:%s \t\t M=%d,T=%d \t Runtime=%0.6lfs\n", __FILE__, M, B, (t_end - t_start)/10));
#ifdef PERFCTR
PERF_EXIT;
#endif
#ifdef VERIFY
for (k = 0; k <= 2; ++k)
{
for (i = 0; i <= M-1; ++i)
{
for (j = 0; j <= M-1; j = ++j)
{
blurx_verify(k,i,j) = (in_verify(k,i,j) * 0.0625f) +
(in_verify(k,(i+1),j) * 0.25f) +
(in_verify(k,(i+2),j) * 0.375f) +
(in_verify(k,(i+3),j) * 0.25f) +
(in_verify(k,(i+4),j) * 0.0625f);
}
}
}
for (k = 0; k <= 2; ++k)
{
for (i = 0; i <= M-1; ++i)
{
for (j = 4; j <= M-1; j = ++j)
{
blury_verify(k,i,j) = (blurx_verify(k,i,j) * 0.0625f) +
(blurx_verify(k,i,(j-1)) * 0.25f) +
(blurx_verify(k,i,(j-2)) * 0.375f) +
(blurx_verify(k,i,(j-3)) * 0.25f) +
(blurx_verify(k,i,(j-4)) * 0.0625f);
if(blury_verify(k,i,j) != BLURY[k][i][j])
{
printf("Blury Difference at %d,%d,%d %f != %f\n", k, i, j, blury_verify(k,i,j), BLURY[k][i][j]);
break;
}
}
}
}
for (k = 0; k <= 2; ++k)
{
for (i = 0; i <= M-1; ++i)
{
for (j = 4; j <= M-1; j = ++j)
{
sharpen_verify[k][i][j] = ((in_verify(k,(i+2),(j-2)) * (1 + weight)) + (blury_verify(k,i,j) * -(weight)));
if(sharpen_verify[k][i][j] != SHARPEN[k][i][j])
{
printf("Sharpen Difference at %d,%d,%d %f != %f\n", k, i, j, sharpen_verify[k][i][j], SHARPEN[k][i][j]);
break;
}
}
}
}
for (k = 0; k <= 2; ++k)
{
for (i = 0; i <= M-1; ++i)
{
for (j = 4; j <= M-1; j = ++j)
{
_ct3 = in_verify(k,(i+2),(j-2));
_ct4 = sharpen_verify[k][i][j];
_ct5 = ((abs((in_verify(k,(i+2),(j-2)) - blury_verify(k,i,j))) < thresh)? _ct3: _ct4);
if(_ct5 != MASK[k][i][j])
{
printf("Difference at %d,%d,%d \n", k, i, j);
break;
}
}
}
}
#endif
/*
if (fopen(".test", "r")) {
// print_array();
}
*/
return 0;
}
|
DRB110-ordered-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <assert.h>
#include <stdio.h>
/* This is a program based on a test contributed by Yizi Gu@Rice Univ.
* Proper user of ordered directive and clause, no data races
* */
int main()
{
int x =0;
#pragma omp parallel for ordered schedule(dynamic)
for (int i = 0; i < 100; ++i) {
#pragma omp ordered
{
x++;
}
}
assert (x==100);
printf ("x=%d\n",x);
return 0;
}
|
GB_unop__acosh_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__acosh_fc64_fc64
// op(A') function: GB_unop_tran__acosh_fc64_fc64
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = cacosh (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = cacosh (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = cacosh (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ACOSH || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__acosh_fc64_fc64
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cacosh (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = cacosh (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__acosh_fc64_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pr44085.c | /* PR middle-end/44085 */
/* { dg-do compile } */
/* { dg-require-effective-target tls_native } */
/* { dg-options "-fopenmp" } */
int thr1, thr2;
#pragma omp threadprivate (thr1, thr2)
void
foo (void)
{
#pragma omp task untied /* { dg-error "enclosing task" } */
{
thr1++; /* { dg-error "used in untied task" } */
thr2 |= 4; /* { dg-error "used in untied task" } */
}
}
void
bar (void)
{
#pragma omp task
{
thr1++;
thr2 |= 4;
}
}
|
millionaire.h | /*
Authors: Deevashwer Rathee, Mayank Rathee
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifndef MILLIONAIRE_H__
#define MILLIONAIRE_H__
#include "Millionaire/bit-triple-generator.h"
#include "OT/emp-ot.h"
#include "utils/emp-tool.h"
#include <cmath>
#include <omp.h>
#define MILL_PARAM 4
#define WAN_EXEC
class MillionaireProtocol {
public:
sci::IOPack *iopack;
sci::OTPack *otpack;
TripleGenerator *triple_gen;
int party;
int l, r, log_alpha, beta, beta_pow;
int num_digits, num_triples_corr, num_triples_std, log_num_digits;
int num_triples;
uint8_t mask_beta, mask_r;
MillionaireProtocol(int party, sci::IOPack *iopack, sci::OTPack *otpack,
int bitlength = 32, int radix_base = MILL_PARAM) {
this->party = party;
this->iopack = iopack;
this->otpack = otpack;
this->triple_gen = new TripleGenerator(party, iopack, otpack);
configure(bitlength, radix_base);
}
void configure(int bitlength, int radix_base = MILL_PARAM) {
assert(radix_base <= 8);
assert(bitlength <= 64);
this->l = bitlength;
this->beta = radix_base;
this->num_digits = ceil((double)l / beta);
this->r = l % beta;
this->log_alpha = sci::bitlen(num_digits) - 1;
this->log_num_digits = log_alpha + 1;
this->num_triples_corr = 2 * num_digits - 2 - 2 * log_num_digits;
this->num_triples_std = log_num_digits;
this->num_triples = num_triples_std + num_triples_corr;
if (beta == 8)
this->mask_beta = -1;
else
this->mask_beta = (1 << beta) - 1;
this->mask_r = (1 << r) - 1;
this->beta_pow = 1 << beta;
}
~MillionaireProtocol() { delete triple_gen; }
void compare(uint8_t *res, uint64_t *data, int num_cmps, int bitlength,
bool greater_than = true, bool equality = false,
int radix_base = MILL_PARAM) {
configure(bitlength, radix_base);
if (bitlength <= beta) {
uint8_t N = 1 << bitlength;
uint8_t mask = N - 1;
if (party == sci::ALICE) {
sci::PRG128 prg;
prg.random_data(res, num_cmps * sizeof(uint8_t));
uint8_t **leaf_messages = new uint8_t *[num_cmps];
for (int i = 0; i < num_cmps; i++) {
res[i] &= 1;
leaf_messages[i] = new uint8_t[N];
for (int j = 0; j < N; j++) {
if (greater_than) {
leaf_messages[i][j] = ((uint8_t(data[i] & mask) > j) ^ res[i]);
} else {
leaf_messages[i][j] = ((uint8_t(data[i] & mask) < j) ^ res[i]);
}
}
}
if (bitlength > 1) {
otpack->kkot[bitlength - 1]->send(leaf_messages, num_cmps, 1);
} else {
otpack->iknp_straight->send(leaf_messages, num_cmps, 1);
}
for (int i = 0; i < num_cmps; i++)
delete[] leaf_messages[i];
delete[] leaf_messages;
} else { // party == BOB
uint8_t *choice = new uint8_t[num_cmps];
for (int i = 0; i < num_cmps; i++) {
choice[i] = data[i] & mask;
}
if (bitlength > 1) {
otpack->kkot[bitlength - 1]->recv(res, choice, num_cmps, 1);
} else {
otpack->iknp_straight->recv(res, choice, num_cmps, 1);
}
delete[] choice;
}
return;
}
int old_num_cmps = num_cmps;
// num_cmps should be a multiple of 8
num_cmps = ceil(num_cmps / 8.0) * 8;
uint64_t *data_ext;
if (old_num_cmps == num_cmps)
data_ext = data;
else {
data_ext = new uint64_t[num_cmps];
memcpy(data_ext, data, old_num_cmps * sizeof(uint64_t));
memset(data_ext + old_num_cmps, 0,
(num_cmps - old_num_cmps) * sizeof(uint64_t));
}
uint8_t *digits; // num_digits * num_cmps
uint8_t *leaf_res_cmp; // num_digits * num_cmps
uint8_t *leaf_res_eq; // num_digits * num_cmps
digits = new uint8_t[num_digits * num_cmps];
leaf_res_cmp = new uint8_t[num_digits * num_cmps];
leaf_res_eq = new uint8_t[num_digits * num_cmps];
// Extract radix-digits from data
for (int i = 0; i < num_digits; i++) // Stored from LSB to MSB
for (int j = 0; j < num_cmps; j++)
if ((i == num_digits - 1) && (r != 0))
digits[i * num_cmps + j] =
(uint8_t)(data_ext[j] >> i * beta) & mask_r;
else
digits[i * num_cmps + j] =
(uint8_t)(data_ext[j] >> i * beta) & mask_beta;
if (party == sci::ALICE) {
uint8_t *
*leaf_ot_messages; // (num_digits * num_cmps) X beta_pow (=2^beta)
leaf_ot_messages = new uint8_t *[num_digits * num_cmps];
for (int i = 0; i < num_digits * num_cmps; i++)
leaf_ot_messages[i] = new uint8_t[beta_pow];
// Set Leaf OT messages
triple_gen->prg->random_bool((bool *)leaf_res_cmp, num_digits * num_cmps);
triple_gen->prg->random_bool((bool *)leaf_res_eq, num_digits * num_cmps);
for (int i = 0; i < num_digits; i++) {
for (int j = 0; j < num_cmps; j++) {
if (i == 0) {
set_leaf_ot_messages(leaf_ot_messages[i * num_cmps + j],
digits[i * num_cmps + j], beta_pow,
leaf_res_cmp[i * num_cmps + j], 0,
greater_than, false);
} else if (i == (num_digits - 1) && (r > 0)) {
#ifdef WAN_EXEC
set_leaf_ot_messages(leaf_ot_messages[i * num_cmps + j],
digits[i * num_cmps + j], beta_pow,
leaf_res_cmp[i * num_cmps + j],
leaf_res_eq[i * num_cmps + j], greater_than);
#else
set_leaf_ot_messages(leaf_ot_messages[i * num_cmps + j],
digits[i * num_cmps + j], 1 << r,
leaf_res_cmp[i * num_cmps + j],
leaf_res_eq[i * num_cmps + j], greater_than);
#endif
} else {
set_leaf_ot_messages(leaf_ot_messages[i * num_cmps + j],
digits[i * num_cmps + j], beta_pow,
leaf_res_cmp[i * num_cmps + j],
leaf_res_eq[i * num_cmps + j], greater_than);
}
}
}
// Perform Leaf OTs
#ifdef WAN_EXEC
// otpack->kkot_beta->send(leaf_ot_messages, num_cmps*(num_digits), 2);
otpack->kkot[beta - 1]->send(leaf_ot_messages, num_cmps * (num_digits),
2);
#else
// otpack->kkot_beta->send(leaf_ot_messages, num_cmps, 1);
otpack->kkot[beta - 1]->send(leaf_ot_messages, num_cmps, 1);
if (r == 1) {
// otpack->kkot_beta->send(leaf_ot_messages+num_cmps,
// num_cmps*(num_digits-2), 2);
otpack->kkot[beta - 1]->send(leaf_ot_messages + num_cmps,
num_cmps * (num_digits - 2), 2);
otpack->iknp_straight->send(
leaf_ot_messages + num_cmps * (num_digits - 1), num_cmps, 2);
} else if (r != 0) {
// otpack->kkot_beta->send(leaf_ot_messages+num_cmps,
// num_cmps*(num_digits-2), 2);
otpack->kkot[beta - 1]->send(leaf_ot_messages + num_cmps,
num_cmps * (num_digits - 2), 2);
otpack->kkot[r - 1]->send(
leaf_ot_messages + num_cmps * (num_digits - 1), num_cmps, 2);
} else {
// otpack->kkot_beta->send(leaf_ot_messages+num_cmps,
// num_cmps*(num_digits-1), 2);
otpack->kkot[beta - 1]->send(leaf_ot_messages + num_cmps,
num_cmps * (num_digits - 1), 2);
}
#endif
// Cleanup
for (int i = 0; i < num_digits * num_cmps; i++)
delete[] leaf_ot_messages[i];
delete[] leaf_ot_messages;
} else // party = sci::BOB
{
// Perform Leaf OTs
#ifdef WAN_EXEC
// otpack->kkot_beta->recv(leaf_res_cmp, digits, num_cmps*(num_digits),
// 2);
otpack->kkot[beta - 1]->recv(leaf_res_cmp, digits,
num_cmps * (num_digits), 2);
#else
// otpack->kkot_beta->recv(leaf_res_cmp, digits, num_cmps, 1);
otpack->kkot[beta - 1]->recv(leaf_res_cmp, digits, num_cmps, 1);
if (r == 1) {
// otpack->kkot_beta->recv(leaf_res_cmp+num_cmps, digits+num_cmps,
// num_cmps*(num_digits-2), 2);
otpack->kkot[beta - 1]->recv(leaf_res_cmp + num_cmps, digits + num_cmps,
num_cmps * (num_digits - 2), 2);
otpack->iknp_straight->recv(leaf_res_cmp + num_cmps * (num_digits - 1),
digits + num_cmps * (num_digits - 1),
num_cmps, 2);
} else if (r != 0) {
// otpack->kkot_beta->recv(leaf_res_cmp+num_cmps, digits+num_cmps,
// num_cmps*(num_digits-2), 2);
otpack->kkot[beta - 1]->recv(leaf_res_cmp + num_cmps, digits + num_cmps,
num_cmps * (num_digits - 2), 2);
otpack->kkot[r - 1]->recv(leaf_res_cmp + num_cmps * (num_digits - 1),
digits + num_cmps * (num_digits - 1),
num_cmps, 2);
} else {
// otpack->kkot_beta->recv(leaf_res_cmp+num_cmps, digits+num_cmps,
// num_cmps*(num_digits-1), 2);
otpack->kkot[beta - 1]->recv(leaf_res_cmp + num_cmps, digits + num_cmps,
num_cmps * (num_digits - 1), 2);
}
#endif
// Extract equality result from leaf_res_cmp
for (int i = num_cmps; i < num_digits * num_cmps; i++) {
leaf_res_eq[i] = leaf_res_cmp[i] & 1;
leaf_res_cmp[i] >>= 1;
}
}
traverse_and_compute_ANDs(num_cmps, leaf_res_eq, leaf_res_cmp);
for (int i = 0; i < old_num_cmps; i++)
res[i] = leaf_res_cmp[i];
// Cleanup
if (old_num_cmps != num_cmps)
delete[] data_ext;
delete[] digits;
delete[] leaf_res_cmp;
delete[] leaf_res_eq;
}
void set_leaf_ot_messages(uint8_t *ot_messages, uint8_t digit, int N,
uint8_t mask_cmp, uint8_t mask_eq,
bool greater_than, bool eq = true) {
for (int i = 0; i < N; i++) {
if (greater_than) {
ot_messages[i] = ((digit > i) ^ mask_cmp);
} else {
ot_messages[i] = ((digit < i) ^ mask_cmp);
}
if (eq) {
ot_messages[i] = (ot_messages[i] << 1) | ((digit == i) ^ mask_eq);
}
}
}
/**************************************************************************************************
* AND computation related functions
**************************************************************************************************/
void traverse_and_compute_ANDs(int num_cmps, uint8_t *leaf_res_eq,
uint8_t *leaf_res_cmp) {
#ifdef WAN_EXEC
Triple triples_std((num_triples)*num_cmps, true);
#else
Triple triples_corr(num_triples_corr * num_cmps, true, num_cmps);
Triple triples_std(num_triples_std * num_cmps, true);
#endif
// Generate required Bit-Triples
#ifdef WAN_EXEC
// std::cout<<"Running on WAN_EXEC; Skipping correlated triples"<<std::endl;
triple_gen->generate(party, &triples_std, _16KKOT_to_4OT);
#else
triple_gen->generate(party, &triples_corr, _8KKOT);
triple_gen->generate(party, &triples_std, _16KKOT_to_4OT);
#endif
// std::cout << "Bit Triples Generated" << std::endl;
// Combine leaf OT results in a bottom-up fashion
int counter_std = 0, old_counter_std = 0;
int counter_corr = 0, old_counter_corr = 0;
int counter_combined = 0, old_counter_combined = 0;
uint8_t *ei = new uint8_t[(num_triples * num_cmps) / 8];
uint8_t *fi = new uint8_t[(num_triples * num_cmps) / 8];
uint8_t *e = new uint8_t[(num_triples * num_cmps) / 8];
uint8_t *f = new uint8_t[(num_triples * num_cmps) / 8];
for (int i = 1; i < num_digits; i *= 2) {
for (int j = 0; j < num_digits and j + i < num_digits; j += 2 * i) {
if (j == 0) {
#ifdef WAN_EXEC
AND_step_1(
ei + (counter_std * num_cmps) / 8,
fi + (counter_std * num_cmps) / 8, leaf_res_cmp + j * num_cmps,
leaf_res_eq + (j + i) * num_cmps,
(triples_std.ai) + (counter_combined * num_cmps) / 8,
(triples_std.bi) + (counter_combined * num_cmps) / 8, num_cmps);
counter_std++;
counter_combined++;
#else
AND_step_1(ei + (counter_std * num_cmps) / 8,
fi + (counter_std * num_cmps) / 8,
leaf_res_cmp + j * num_cmps,
leaf_res_eq + (j + i) * num_cmps,
(triples_std.ai) + (counter_std * num_cmps) / 8,
(triples_std.bi) + (counter_std * num_cmps) / 8, num_cmps);
counter_std++;
#endif
} else {
#ifdef WAN_EXEC
AND_step_1(
ei + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
fi + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
leaf_res_cmp + j * num_cmps, leaf_res_eq + (j + i) * num_cmps,
(triples_std.ai) + (counter_combined * num_cmps) / 8,
(triples_std.bi) + (counter_combined * num_cmps) / 8, num_cmps);
counter_combined++;
AND_step_1(
ei + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
fi + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
leaf_res_eq + j * num_cmps, leaf_res_eq + (j + i) * num_cmps,
(triples_std.ai) + (counter_combined * num_cmps) / 8,
(triples_std.bi) + (counter_combined * num_cmps) / 8, num_cmps);
counter_combined++;
counter_corr++;
#else
AND_step_1(
ei + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
fi + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
leaf_res_cmp + j * num_cmps, leaf_res_eq + (j + i) * num_cmps,
(triples_corr.ai) + (2 * counter_corr * num_cmps) / 8,
(triples_corr.bi) + (2 * counter_corr * num_cmps) / 8, num_cmps);
AND_step_1(
ei + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
fi + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
leaf_res_eq + j * num_cmps, leaf_res_eq + (j + i) * num_cmps,
(triples_corr.ai) + ((2 * counter_corr + 1) * num_cmps) / 8,
(triples_corr.bi) + ((2 * counter_corr + 1) * num_cmps) / 8,
num_cmps);
counter_corr++;
#endif
}
}
int offset_std = (old_counter_std * num_cmps) / 8;
int size_std = ((counter_std - old_counter_std) * num_cmps) / 8;
int offset_corr =
((num_triples_std + 2 * old_counter_corr) * num_cmps) / 8;
int size_corr = (2 * (counter_corr - old_counter_corr) * num_cmps) / 8;
#pragma omp parallel num_threads(2)
{
if (omp_get_thread_num() == 1) {
if (party == sci::ALICE) {
iopack->io_rev->recv_data(e + offset_std, size_std);
iopack->io_rev->recv_data(e + offset_corr, size_corr);
iopack->io_rev->recv_data(f + offset_std, size_std);
iopack->io_rev->recv_data(f + offset_corr, size_corr);
} else { // party == sci::BOB
iopack->io_rev->send_data(ei + offset_std, size_std);
iopack->io_rev->send_data(ei + offset_corr, size_corr);
iopack->io_rev->send_data(fi + offset_std, size_std);
iopack->io_rev->send_data(fi + offset_corr, size_corr);
}
} else {
if (party == sci::ALICE) {
iopack->io->send_data(ei + offset_std, size_std);
iopack->io->send_data(ei + offset_corr, size_corr);
iopack->io->send_data(fi + offset_std, size_std);
iopack->io->send_data(fi + offset_corr, size_corr);
} else { // party == sci::BOB
iopack->io->recv_data(e + offset_std, size_std);
iopack->io->recv_data(e + offset_corr, size_corr);
iopack->io->recv_data(f + offset_std, size_std);
iopack->io->recv_data(f + offset_corr, size_corr);
}
}
}
for (int i = 0; i < size_std; i++) {
e[i + offset_std] ^= ei[i + offset_std];
f[i + offset_std] ^= fi[i + offset_std];
}
for (int i = 0; i < size_corr; i++) {
e[i + offset_corr] ^= ei[i + offset_corr];
f[i + offset_corr] ^= fi[i + offset_corr];
}
counter_std = old_counter_std;
counter_corr = old_counter_corr;
#ifdef WAN_EXEC
counter_combined = old_counter_combined;
#endif
for (int j = 0; j < num_digits and j + i < num_digits; j += 2 * i) {
if (j == 0) {
#ifdef WAN_EXEC
AND_step_2(
leaf_res_cmp + j * num_cmps, e + (counter_std * num_cmps) / 8,
f + (counter_std * num_cmps) / 8,
ei + (counter_std * num_cmps) / 8,
fi + (counter_std * num_cmps) / 8,
(triples_std.ai) + (counter_combined * num_cmps) / 8,
(triples_std.bi) + (counter_combined * num_cmps) / 8,
(triples_std.ci) + (counter_combined * num_cmps) / 8, num_cmps);
counter_combined++;
#else
AND_step_2(leaf_res_cmp + j * num_cmps,
e + (counter_std * num_cmps) / 8,
f + (counter_std * num_cmps) / 8,
ei + (counter_std * num_cmps) / 8,
fi + (counter_std * num_cmps) / 8,
(triples_std.ai) + (counter_std * num_cmps) / 8,
(triples_std.bi) + (counter_std * num_cmps) / 8,
(triples_std.ci) + (counter_std * num_cmps) / 8, num_cmps);
#endif
for (int k = 0; k < num_cmps; k++)
leaf_res_cmp[j * num_cmps + k] ^=
leaf_res_cmp[(j + i) * num_cmps + k];
counter_std++;
} else {
#ifdef WAN_EXEC
AND_step_2(leaf_res_cmp + j * num_cmps,
e + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
f + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
ei + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
fi + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
(triples_std.ai) + (counter_combined * num_cmps) / 8,
(triples_std.bi) + (counter_combined * num_cmps) / 8,
(triples_std.ci) + (counter_combined * num_cmps) / 8,
num_cmps);
counter_combined++;
AND_step_2(
leaf_res_eq + j * num_cmps,
e + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
f + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
ei + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
fi + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
(triples_std.ai) + (counter_combined * num_cmps) / 8,
(triples_std.bi) + (counter_combined * num_cmps) / 8,
(triples_std.ci) + (counter_combined * num_cmps) / 8, num_cmps);
counter_combined++;
#else
AND_step_2(leaf_res_cmp + j * num_cmps,
e + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
f + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
ei + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
fi + ((num_triples_std + 2 * counter_corr) * num_cmps) / 8,
(triples_corr.ai) + (2 * counter_corr * num_cmps) / 8,
(triples_corr.bi) + (2 * counter_corr * num_cmps) / 8,
(triples_corr.ci) + (2 * counter_corr * num_cmps) / 8,
num_cmps);
AND_step_2(
leaf_res_eq + j * num_cmps,
e + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
f + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
ei + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
fi + ((num_triples_std + (2 * counter_corr + 1)) * num_cmps) / 8,
(triples_corr.ai) + ((2 * counter_corr + 1) * num_cmps) / 8,
(triples_corr.bi) + ((2 * counter_corr + 1) * num_cmps) / 8,
(triples_corr.ci) + ((2 * counter_corr + 1) * num_cmps) / 8,
num_cmps);
#endif
for (int k = 0; k < num_cmps; k++)
leaf_res_cmp[j * num_cmps + k] ^=
leaf_res_cmp[(j + i) * num_cmps + k];
counter_corr++;
}
}
old_counter_std = counter_std;
old_counter_corr = counter_corr;
#ifdef WAN_EXEC
old_counter_combined = counter_combined;
#endif
}
#ifdef WAN_EXEC
assert(counter_combined == num_triples);
#else
assert(counter_std == num_triples_std);
assert(2 * counter_corr == num_triples_corr);
#endif
// cleanup
delete[] ei;
delete[] fi;
delete[] e;
delete[] f;
}
void AND_step_1(uint8_t *ei, // evaluates batch of 8 ANDs
uint8_t *fi, uint8_t *xi, uint8_t *yi, uint8_t *ai,
uint8_t *bi, int num_ANDs) {
assert(num_ANDs % 8 == 0);
for (int i = 0; i < num_ANDs; i += 8) {
ei[i / 8] = ai[i / 8];
fi[i / 8] = bi[i / 8];
ei[i / 8] ^= sci::bool_to_uint8(xi + i, 8);
fi[i / 8] ^= sci::bool_to_uint8(yi + i, 8);
}
}
void AND_step_2(uint8_t *zi, // evaluates batch of 8 ANDs
uint8_t *e, uint8_t *f, uint8_t *ei, uint8_t *fi, uint8_t *ai,
uint8_t *bi, uint8_t *ci, int num_ANDs) {
assert(num_ANDs % 8 == 0);
for (int i = 0; i < num_ANDs; i += 8) {
uint8_t temp_z;
if (party == sci::ALICE)
temp_z = e[i / 8] & f[i / 8];
else
temp_z = 0;
temp_z ^= f[i / 8] & ai[i / 8];
temp_z ^= e[i / 8] & bi[i / 8];
temp_z ^= ci[i / 8];
sci::uint8_to_bool(zi + i, temp_z, 8);
}
}
};
#endif // MILLIONAIRE_H__
|
benchmark_tree_learner.h | /*!
* Copyright (c) 2016 Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See LICENSE file in the project root for license information.
*/
#pragma once
#include <LightGBM/dataset.h>
#include <LightGBM/tree.h>
#include <LightGBM/tree_learner.h>
#include <LightGBM/utils/array_args.h>
#include <LightGBM/utils/random.h>
#include <string>
#include <cmath>
#include <cstdio>
#include <memory>
#include <random>
#include <vector>
#include "data_partition.hpp"
#include "feature_histogram.hpp"
#include "leaf_splits.hpp"
#include "split_info.hpp"
#ifdef USE_GPU
// Use 4KBytes aligned allocator for ordered gradients and ordered hessians when GPU is enabled.
// This is necessary to pin the two arrays in memory and make transferring faster.
#include <boost/align/aligned_allocator.hpp>
#endif
using namespace json11;
namespace LightGBM {
/*!
* \brief Used for learning a tree by single machine
*/
class BenchmarkTreeLearner: public TreeLearner {
public:
explicit BenchmarkTreeLearner(const Config* config);
~BenchmarkTreeLearner();
void Init(const Dataset* train_data, bool is_constant_hessian) override;
void ResetTrainingData(const Dataset* train_data) override;
void ResetConfig(const Config* config) override;
Tree* Train(const score_t* gradients, const score_t *hessians, bool is_constant_hessian,
const Json& forced_split_json) override;
Tree* FitByExistingTree(const Tree* old_tree, const score_t* gradients, const score_t* hessians) const override;
Tree* FitByExistingTree(const Tree* old_tree, const std::vector<int>& leaf_pred,
const score_t* gradients, const score_t* hessians) override;
void SetBaggingData(const data_size_t* used_indices, data_size_t num_data) override {
data_partition_->SetUsedDataIndices(used_indices, num_data);
}
void AddPredictionToScore(const Tree* tree, double* out_score) const override {
if (tree->num_leaves() <= 1) { return; }
CHECK(tree->num_leaves() <= data_partition_->num_leaves());
#pragma omp parallel for schedule(static)
for (int i = 0; i < tree->num_leaves(); ++i) {
double output = static_cast<double>(tree->LeafOutput(i));
data_size_t cnt_leaf_data = 0;
auto tmp_idx = data_partition_->GetIndexOnLeaf(i, &cnt_leaf_data);
for (data_size_t j = 0; j < cnt_leaf_data; ++j) {
out_score[tmp_idx[j]] += output;
}
}
}
void RenewTreeOutput(Tree* tree, const ObjectiveFunction* obj, std::function<double(const label_t*, int)> residual_getter,
data_size_t total_num_data, const data_size_t* bag_indices, data_size_t bag_cnt) const override;
protected:
virtual std::vector<int8_t> GetUsedFeatures(bool is_tree_level);
/*!
* \brief Some initial works before training
*/
virtual void BeforeTrain();
/*!
* \brief Some initial works before FindBestSplit
*/
virtual bool BeforeFindBestSplit(const Tree* tree, int left_leaf, int right_leaf);
virtual void FindBestSplits();
virtual void ConstructHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
virtual void FindBestSplitsFromHistograms(const std::vector<int8_t>& is_feature_used, bool use_subtract);
/*!
* \brief Partition tree and data according best split.
* \param tree Current tree, will be splitted on this function.
* \param best_leaf The index of leaf that will be splitted.
* \param left_leaf The index of left leaf after splitted.
* \param right_leaf The index of right leaf after splitted.
*/
virtual void Split(Tree* tree, int best_leaf, int* left_leaf, int* right_leaf);
/* Force splits with forced_split_json dict and then return num splits forced.*/
virtual int32_t ForceSplits(Tree* tree, const Json& forced_split_json, int* left_leaf,
int* right_leaf, int* cur_depth,
bool *aborted_last_force_split);
/*!
* \brief Get the number of data in a leaf
* \param leaf_idx The index of leaf
* \return The number of data in the leaf_idx leaf
*/
inline virtual data_size_t GetGlobalDataCountInLeaf(int leaf_idx) const;
double CalculateOndemandCosts(int feature_index, int leaf_index);
/*! \brief number of data */
data_size_t num_data_;
/*! \brief number of features */
int num_features_;
/*! \brief training data */
const Dataset* train_data_;
/*! \brief gradients of current iteration */
const score_t* gradients_;
/*! \brief hessians of current iteration */
const score_t* hessians_;
/*! \brief training data partition on leaves */
std::unique_ptr<DataPartition> data_partition_;
/*! \brief used for generate used features */
Random random_;
/*! \brief used for sub feature training, is_feature_used_[i] = false means don't used feature i */
std::vector<int8_t> is_feature_used_;
/*! \brief used feature indices in current tree */
std::vector<int> used_feature_indices_;
/*! \brief pointer to histograms array of parent of current leaves */
FeatureHistogram* parent_leaf_histogram_array_;
/*! \brief pointer to histograms array of smaller leaf */
FeatureHistogram* smaller_leaf_histogram_array_;
/*! \brief pointer to histograms array of larger leaf */
FeatureHistogram* larger_leaf_histogram_array_;
/*! \brief store best split points for all leaves */
std::vector<SplitInfo> best_split_per_leaf_;
/*! \brief store best split per feature for all leaves */
std::vector<SplitInfo> splits_per_leaf_;
/*! \brief stores best thresholds for all feature for smaller leaf */
std::unique_ptr<LeafSplits> smaller_leaf_splits_;
/*! \brief stores best thresholds for all feature for larger leaf */
std::unique_ptr<LeafSplits> larger_leaf_splits_;
std::vector<int> valid_feature_indices_;
#ifdef USE_GPU
/*! \brief gradients of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized, aligned to 4K page */
std::vector<score_t, boost::alignment::aligned_allocator<score_t, 4096>> ordered_hessians_;
#else
/*! \brief gradients of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_gradients_;
/*! \brief hessians of current iteration, ordered for cache optimized */
std::vector<score_t> ordered_hessians_;
#endif
/*! \brief Store ordered bin */
std::vector<std::unique_ptr<OrderedBin>> ordered_bins_;
/*! \brief True if has ordered bin */
bool has_ordered_bin_ = false;
/*! \brief is_data_in_leaf_[i] != 0 means i-th data is marked */
std::vector<char> is_data_in_leaf_;
/*! \brief used to cache historical histogram to speed up*/
HistogramPool histogram_pool_;
/*! \brief config of tree learner*/
const Config* config_;
int num_threads_;
std::vector<int> ordered_bin_indices_;
bool is_constant_hessian_;
std::vector<bool> is_feature_used_in_split_;
std::vector<uint32_t> feature_used_in_data;
};
inline data_size_t BenchmarkTreeLearner::GetGlobalDataCountInLeaf(int leaf_idx) const {
if (leaf_idx >= 0) {
return data_partition_->leaf_count(leaf_idx);
} else {
return 0;
}
}
} // namespace LightGBM
|
mrg8_vec.h | /*
* mrg8.h
*
* Created on: Apr 6, 2015
* Author: aghasemi
* Updated on: June 29, 2017
* Author: Yusuke
* Updated on: April 9, 2018
* Author: Yusuke
*/
#ifndef MRG8_VEC_H
#define MRG8_VEC_H
#include <vector>
#include <stdint.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <cstdlib>
#include <cmath>
#include <sys/time.h>
#include <ctime>
#include <omp.h>
#include <emmintrin.h>
#include <immintrin.h>
#include <zmmintrin.h>
#include "mrg8.h"
using namespace std;
#define AVX512
class mrg8_vec : public mrg8
{
public:
mrg8_vec();
mrg8_vec(const uint32_t seed_val);
~mrg8_vec()
{
}
void mrg8_vec_inner(double *ran, int n);
double mrg8_vec_inner();
double mrg8_vec_inner(uint32_t *new_state);
void mrg8_vec_outer(double * ran, int n);
double mrg8_vec_outer();
double mrg8_vec_outer(uint32_t *new_state);
double operator() ()
{
return mrg8_vec_outer();
}
void mrg8_vec_inner_tp(double *ran, int n);
void mrg8_vec_outer_tp(double * ran, int n);
void mrg8_vec_outer_tp_small(double * ran, int each_n, int it);
void mrg8_vec_outer_tp_sub(double * ran, int n, int sub_N);
private:
int64_t A8_IP_MATRIX[64];
int64_t A8_OP_MATRIX[64];
int64_t A8_OP_SH_MATRIX[64];
uint32_t A816_OP_SH_MATRIX[128];
void mrg8_vec_inner(double *ran, int n, uint32_t *each_state);
void mrg8_vec_outer(double * ran, int n, uint32_t *each_state);
};
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mrg8_vec::mrg8_vec(): mrg8()
{
read_jump_matrix();
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
A8_IP_MATRIX[(7 - i) * 8 + (7 - j)] = (uint64_t)(JUMP_MATRIX[8 * 8 * 3 + i + j * 8]);
}
}
for (int i = 0; i < 64; ++i) {
A8_OP_MATRIX[i] = (uint64_t)(JUMP_MATRIX[8 * 8 * 4 - 1 - i]);
}
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
A8_OP_SH_MATRIX[i + j * 8] = A8_OP_MATRIX[i + ((i + j) % 8) * 8];
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
mrg8_vec::mrg8_vec(const uint32_t seed_val): mrg8(seed_val)
{
read_jump_matrix();
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
A8_IP_MATRIX[(7 - i) * 8 + (7 - j)] = (uint64_t)(JUMP_MATRIX[8 * 8 * 3 + i + j * 8]);
}
}
for (int i = 0; i < 64; ++i) {
A8_OP_MATRIX[i] = (uint64_t)(JUMP_MATRIX[8 * 8 * 4 - 1 - i]);
}
for (int i = 0; i < 8; ++i) {
for (int j = 0; j < 8; ++j) {
A8_OP_SH_MATRIX[i + j * 8] = A8_OP_MATRIX[i + ((i + j) % 8) * 8];
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
inline void mrg8_vec::mrg8_vec_inner(double * ran, int n, uint32_t *each_state)
{
#ifdef AVX512
int i, j, k;
double rnorm = 1.0 / static_cast<double>(MASK);
uint64_t r_state[8];
__m512i mone_m = _mm512_set1_epi64(-1);
__m512i state1_m, state2_m, s1_m, s2_m, s_m, mask_m, a_m;
__m512d ran_m, rnorm_m;
__m256i s_32m;
uint64_t s;
for (i = 0; i < 8; ++i) {
r_state[i] = (uint64_t)(each_state[7 - i]);
}
state1_m = _mm512_load_epi64(r_state);
mask_m = _mm512_set1_epi64(MASK);
rnorm_m = _mm512_set1_pd(rnorm);
for (i = 0; i < n - 8; i+=8) {
if (((i >> 3) & 1) == 0) {
for (k = 0; k < 8; ++k) {
a_m = _mm512_load_epi64(A8_IP_MATRIX + k * 8);
s1_m = _mm512_mul_epu32(a_m, state1_m);
s_m = _mm512_and_epi64(s1_m, mask_m);
s2_m = _mm512_srli_epi64(s1_m, 31);
s_m = _mm512_add_epi64(s_m, s2_m);
s = _mm512_reduce_add_epi64(s_m);
state2_m[k] = s;
}
s_m = _mm512_and_epi64(state2_m, mask_m);
state2_m = _mm512_srli_epi64(state2_m, 31);
state2_m = _mm512_add_epi64(s_m, state2_m);
s_m = _mm512_and_epi64(state2_m, mask_m);
state2_m = _mm512_srli_epi64(state2_m, 31);
state2_m = _mm512_add_epi64(s_m, state2_m);
s_m = _mm512_add_epi64(state2_m, mone_m);
s_32m = _mm512_cvtepi64_epi32(s_m);
ran_m = _mm512_cvtepi32_pd(s_32m);
ran_m = _mm512_mul_pd(ran_m, rnorm_m);
_mm512_store_pd(ran + i, ran_m);
}
else {
for (k = 0; k < 8; ++k) {
a_m = _mm512_load_epi64(A8_IP_MATRIX + k * 8);
s1_m = _mm512_mul_epu32(a_m, state2_m);
s_m = _mm512_and_epi64(s1_m, mask_m);
s2_m = _mm512_srli_epi64(s1_m, 31);
s_m = _mm512_add_epi64(s_m, s2_m);
s = _mm512_reduce_add_epi64(s_m);
state1_m[k] = s;
}
s_m = _mm512_and_epi64(state1_m, mask_m);
state1_m = _mm512_srli_epi64(state1_m, 31);
state1_m = _mm512_add_epi64(s_m, state1_m);
s_m = _mm512_and_epi64(state1_m, mask_m);
state1_m = _mm512_srli_epi64(state1_m, 31);
state1_m = _mm512_add_epi64(s_m, state1_m);
s_m = _mm512_add_epi64(state1_m, mone_m);
s_32m = _mm512_cvtepi64_epi32(s_m);
ran_m = _mm512_cvtepi32_pd(s_32m);
ran_m = _mm512_mul_pd(ran_m, rnorm_m);
_mm512_store_pd(ran + i, ran_m);
}
}
if (((i >> 3) & 1) == 0) {
for (k = 0; k < (n - i); ++k) {
a_m = _mm512_load_epi64(A8_IP_MATRIX + k * 8);
s1_m = _mm512_mul_epu32(a_m, state1_m);
s_m = _mm512_and_epi64(s1_m, mask_m);
s2_m = _mm512_srli_epi64(s1_m, 31);
s_m = _mm512_add_epi64(s_m, s2_m);
s = _mm512_reduce_add_epi64(s_m);
state2_m[k] = s;
}
s_m = _mm512_and_epi64(state2_m, mask_m);
state2_m = _mm512_srli_epi64(state2_m, 31);
state2_m = _mm512_add_epi64(s_m, state2_m);
s_m = _mm512_and_epi64(state2_m, mask_m);
state2_m = _mm512_srli_epi64(state2_m, 31);
state2_m = _mm512_add_epi64(s_m, state2_m);
s_m = _mm512_add_epi64(state2_m, mone_m);
s_32m = _mm512_cvtepi64_epi32(s_m);
ran_m = _mm512_cvtepi32_pd(s_32m);
ran_m = _mm512_mul_pd(ran_m, rnorm_m);
for (k = 0; k < n - i; ++k) {
ran[i + k] = ran_m[k];
}
for (j = k; j < 8; ++j) {
each_state[7 - (j - k)] = (uint32_t)(state1_m[j]);
}
for (j = 0; j < k; ++j) {
each_state[k - j - 1] = (uint32_t)(state2_m[j]);
}
}
else {
for (k = 0; k < (n - i); ++k) {
a_m = _mm512_load_epi64(A8_IP_MATRIX + k * 8);
s1_m = _mm512_mul_epu32(a_m, state2_m);
s_m = _mm512_and_epi64(s1_m, mask_m);
s2_m = _mm512_srli_epi64(s1_m, 31);
s_m = _mm512_add_epi64(s_m, s2_m);
s = _mm512_reduce_add_epi64(s_m);
state1_m[k] = s;
}
s_m = _mm512_and_epi64(state1_m, mask_m);
state1_m = _mm512_srli_epi64(state1_m, 31);
state1_m = _mm512_add_epi64(s_m, state1_m);
s_m = _mm512_and_epi64(state1_m, mask_m);
state1_m = _mm512_srli_epi64(state1_m, 31);
state1_m = _mm512_add_epi64(s_m, state1_m);
s_m = _mm512_add_epi64(state1_m, mone_m);
s_32m = _mm512_cvtepi64_epi32(s_m);
ran_m = _mm512_cvtepi32_pd(s_32m);
ran_m = _mm512_mul_pd(ran_m, rnorm_m);
for (k = 0; k < n - i; ++k) {
ran[i + k] = ran_m[k];
}
for (j = k; j < 8; ++j) {
each_state[7 - (j - k)] = (uint32_t)(state2_m[j]);
}
for (j = 0; j < k; ++j) {
each_state[k - j - 1] = (uint32_t)(state1_m[j]);
}
}
#else
int i, j, k;
uint32_t r_state[2][8];
uint64_t s1, s2, s;
double rnorm = 1.0 / static_cast<double>(MASK);
int target;
for (i = 0; i < 8; ++i) {
r_state[0][i] = each_state[7 - i];
}
for (i = 0; i < n; i+=8) {
target = (i >> 3) & 1;
for (k = 0; k < 8 && i + k < n; ++k) {
s1 = 0;
s2 = 0;
for (j = 0; j < 4; ++j) {
s1 += (uint64_t)(A8_IP_MATRIX[k * 8 + j]) * r_state[target][j];
s2 += (uint64_t)(A8_IP_MATRIX[k * 8 + j + 4]) * r_state[target][j + 4];
}
s = (s1 & MASK) + (s1 >> 31) + (s2 & MASK) + (s2 >> 31);
s = (s & MASK) + (s >> 31);
r_state[1 - target][k] = (s & MASK) + (s >> 31);
ran[i + k] = static_cast<double>(r_state[1 - target][k] - 1) * rnorm;
}
}
for (i = k; i < 8; ++i) {
each_state[7 - (i - k)] = r_state[target][i];
}
for (i = 0; i < k; ++i) {
each_state[k - i - 1] = r_state[1 - target][i];
}
#endif
}
inline void mrg8_vec::mrg8_vec_inner(double * ran, int n)
{
mrg8_vec_inner(ran, n, state);
}
inline double mrg8_vec::mrg8_vec_inner()
{
double r;
mrg8_vec_inner(&r, 1, state);
return r;
}
inline double mrg8_vec::mrg8_vec_inner(uint32_t *new_state)
{
double r;
mrg8_vec_inner(&r, 1, new_state);
return r;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
inline void mrg8_vec::mrg8_vec_outer(double * ran, int n, uint32_t *each_state)
{
#ifdef AVX512
int i, j;
uint64_t r_state[8];
const __m512i one_m = _mm512_set1_epi64(1);
const __m512i idx_m = _mm512_set_epi64(0, 7, 6, 5, 4, 3, 2, 1);
__m256i state_32m;
__m512i state_m, s_m, s1_m, s2_m, mask_m, a_m[8];
__m512d ran_m, rnorm_m;
double rnorm = 1.0 / static_cast<double>(MASK);
for (i = 0; i < 8; ++i) {
r_state[i] = each_state[7 - i];
a_m[i] = _mm512_load_epi64(A8_OP_SH_MATRIX + i * 8);
}
mask_m = _mm512_set1_epi64(MASK);
rnorm_m = _mm512_set1_pd(rnorm);
state_m = _mm512_load_epi64(r_state);
i = 0;
for (i = 0; i < n - 8; i+=8) {
s1_m = _mm512_setzero_si512();
s2_m = _mm512_setzero_si512();
for (j = 0; j < 4; ++j) {
s_m = _mm512_mul_epu32(a_m[j], state_m);
s1_m = _mm512_add_epi64(s1_m, s_m);
state_m = _mm512_permutexvar_epi64(idx_m, state_m);
}
for (j = 0; j < 4; ++j) {
s_m = _mm512_mul_epu32(a_m[j + 4], state_m);
s2_m = _mm512_add_epi64(s2_m, s_m);
state_m = _mm512_permutexvar_epi64(idx_m, state_m);
}
s_m = _mm512_and_epi64(s1_m, mask_m);
s1_m = _mm512_srli_epi64(s1_m, 31);
s1_m = _mm512_add_epi64(s_m, s1_m);
s_m = _mm512_and_epi64(s2_m, mask_m);
s2_m = _mm512_srli_epi64(s2_m, 31);
s2_m = _mm512_add_epi64(s_m, s2_m);
s_m = _mm512_add_epi64(s1_m, s2_m);
state_m = _mm512_and_epi64(s_m, mask_m);
s_m = _mm512_srli_epi64(s_m, 31);
s_m = _mm512_add_epi64(s_m, state_m);
state_m = _mm512_and_epi64(s_m, mask_m);
s_m = _mm512_srli_epi64(s_m, 31);
state_m = _mm512_add_epi64(s_m, state_m);
s_m = _mm512_sub_epi64(state_m, one_m);
state_32m = _mm512_cvtepi64_epi32(s_m);
ran_m = _mm512_cvtepi32_pd(state_32m);
ran_m = _mm512_mul_pd(ran_m, rnorm_m);
_mm512_store_pd(ran + i, ran_m);
}
_mm512_store_epi64(r_state, state_m);
/* Fraction */
s1_m = _mm512_setzero_si512();
s2_m = _mm512_setzero_si512();
for (j = 0; j < 4; ++j) {
s_m = _mm512_mul_epu32(a_m[j], state_m);
s1_m = _mm512_add_epi64(s1_m, s_m);
state_m = _mm512_permutexvar_epi64(idx_m, state_m);
}
for (j = 0; j < 4; ++j) {
s_m = _mm512_mul_epu32(a_m[j + 4], state_m);
s2_m = _mm512_add_epi64(s2_m, s_m);
state_m = _mm512_permutexvar_epi64(idx_m, state_m);
}
s_m = _mm512_and_epi64(s1_m, mask_m);
s1_m = _mm512_srli_epi64(s1_m, 31);
s1_m = _mm512_add_epi64(s_m, s1_m);
s_m = _mm512_and_epi64(s2_m, mask_m);
s2_m = _mm512_srli_epi64(s2_m, 31);
s2_m = _mm512_add_epi64(s_m, s2_m);
s_m = _mm512_add_epi64(s1_m, s2_m);
state_m = _mm512_and_epi64(s_m, mask_m);
s_m = _mm512_srli_epi64(s_m, 31);
state_m = _mm512_add_epi64(s_m, state_m);
s_m = _mm512_sub_epi64(state_m, one_m);
state_32m = _mm512_cvtepi64_epi32(s_m);
ran_m = _mm512_cvtepi32_pd(state_32m);
ran_m = _mm512_mul_pd(ran_m, rnorm_m);
for (j = 0; j < n - i; ++j) {
ran[i + j] = ran_m[j];
}
for (i = 0; i < j; ++i) {
each_state[j - 1 - i] = (uint32_t)(state_m[i]);
}
for (i = j; i < 8; ++i) {
each_state[j + 7 - i] = (uint32_t)(r_state[i]);
}
#else
int i, j, k;
uint32_t r_state[8];
uint64_t s1[8], s2[8], s[8];
double rnorm = 1.0 / static_cast<double>(MASK);
for (i = 0; i < 8; ++i) {
r_state[i] = each_state[7 - i];
}
for (i = 0; i < n; i+=8) {
for (k = 0; k < 8; ++k) {
s1[k] = 0;
s2[k] = 0;
}
for (j = 0; j < 4; ++j) {
for (k = 0; k < 8; ++k) {
s1[k] += (uint64_t)(A8_OP_MATRIX[j * 8 + k]) * r_state[j];
s2[k] += (uint64_t)(A8_OP_MATRIX[(j + 4) * 8 + k]) * r_state[j + 4];
}
}
for (k = 0; k < 8 && i + k < n; ++k) { //only unroll not vectorized
s[k] = (s1[k] & MASK) + (s1[k] >> 31) + (s2[k] & MASK) + (s2[k] >> 31);
r_state[k] = (s[k] & MASK) + (s[k] >> 31);
ran[i + k] = static_cast<double>(r_state[k] - 1) * rnorm;
}
}
for (i = 0; i < k; ++i) {
each_state[k - 1 - i] = r_state[i];
}
for (i = k; i < 8; ++i) {
each_state[k + 7 - i] = r_state[i];
}
#endif
}
inline void mrg8_vec::mrg8_vec_outer(double * ran, const int n)
{
mrg8_vec_outer(ran, n, state);
}
inline double mrg8_vec::mrg8_vec_outer()
{
double r;
mrg8_vec_outer(&r, 1, state);
return r;
}
inline double mrg8_vec::mrg8_vec_outer(uint32_t *new_state)
{
double r;
mrg8_vec_outer(&r, 1, new_state);
return r;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
inline void mrg8_vec::mrg8_vec_inner_tp(double * ran, int n)
{
int tnum = omp_get_max_threads();
uint32_t next_state[8];
#pragma omp parallel
{
int each_n = ((n / tnum) / 8) * 8;
int tid = omp_get_thread_num();
int start = each_n * tid;
uint32_t *each_state = new uint32_t[8];
if (tid == (tnum - 1)) {
each_n = n - each_n * tid;
}
jump_ahead(start, each_state);
mrg8_vec_inner(ran + start, each_n, each_state);
if (tid == tnum - 1) {
for (int j = 0; j < 8; ++j) {
next_state[j] = each_state[j];
}
}
delete[] each_state;
}
for (int i = 0; i < 8; ++i) {
state[i] = next_state[i];
}
}
inline void mrg8_vec::mrg8_vec_outer_tp(double * ran, int n)
{
int tnum = omp_get_max_threads();
uint32_t next_state[8];
#pragma omp parallel
{
int each_n = ((n / tnum) / 8) * 8;
int tid = omp_get_thread_num();
int start = each_n * tid;
uint32_t *each_state = new uint32_t[8];
if (tid == (tnum - 1)) {
each_n = n - each_n * tid;
}
jump_ahead(start, each_state);
mrg8_vec_outer(ran + start, each_n, each_state);
if (tid == tnum - 1) {
for (int j = 0; j < 8; ++j) {
next_state[j] = each_state[j];
}
}
delete[] each_state;
}
for (int i = 0; i < 8; ++i) {
state[i] = next_state[i];
}
}
inline void mrg8_vec::mrg8_vec_outer_tp_small(double * ran, int each_n, int it)
{
int tnum = omp_get_max_threads();
uint32_t next_state[8];
#pragma omp parallel
{
int tid = omp_get_thread_num();
int offset = each_n * tid;
int start = offset * it;
uint32_t *each_state = new uint32_t[8];
jump_ahead(start, each_state);
for (int i = 0; i < it; ++i) {
mrg8_vec_outer(ran + offset, each_n, each_state);
}
if (tid == tnum - 1) {
for (int j = 0; j < 8; ++j) {
next_state[j] = each_state[j];
}
}
delete[] each_state;
}
for (int i = 0; i < 8; ++i) {
state[i] = next_state[i];
}
}
#endif
|
inference.c | #include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <sys/time.h>
#include <assert.h>
#include <string.h>
/* Optionally include OpenMP with the -fopenmp flag */
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "include/lbfgs.h"
#include "include/plm.h"
#include "include/inference.h"
#define PI 3.14159265358979323846
/* Numerical bounds for ZeroAPCPriors */
#define LAMBDA_J_MIN 1E-2
#define LAMBDA_J_MAX 1E4
#define REGULARIZATION_GROUP_EPS 0.001
/* Internal to InferPairModel:
MAP estimation of parameters by L-BFGS */
void EstimatePairModelMAP(numeric_t *x, numeric_t *lambdas, alignment_t *ali,
options_t *options);
/* Internal to EstimatePairModelMAP:
Objective functions for point parameter estimates (MAP) */
static lbfgsfloatval_t PLMNegLogPosterior(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
static lbfgsfloatval_t PLMNegLogPosteriorGapReduce(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
static lbfgsfloatval_t PLMNegLogPosteriorBlock(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
static lbfgsfloatval_t PLMNegLogPosteriorDO(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step);
/* Internal to EstimatePairModelMAP: progress reporting */
static int ReportProgresslBFGS(void *instance, const lbfgsfloatval_t *x,
const lbfgsfloatval_t *g, const lbfgsfloatval_t fx,
const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm,
const lbfgsfloatval_t step, int n, int k, int ls);
/* Internal to EstimatePairModelMAP: parameter processing */
void PreCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g,
alignment_t *ali, options_t *options);
lbfgsfloatval_t PostCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g, lbfgsfloatval_t fx,
alignment_t *ali, options_t *options);
void ZeroAPCPriors(alignment_t *ali, options_t *options, numeric_t *lambdas,
lbfgsfloatval_t *x);
/* Internal to EstimatePairModelMAP: utility functions to L-BFGS */
const char *LBFGSErrorString(int ret);
numeric_t *InferPairModel(alignment_t *ali, options_t *options) {
/* Estimate the parameters of a maximum entropy model for a
multiple sequence alignment */
/* Initialize the regularization parameters */
numeric_t *lambdas =
(numeric_t *) malloc((ali->nSites + ali->nSites * (ali->nSites - 1) / 2)
* sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++) lambdaHi(i) = options->lambdaH;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
lambdaEij(i, j) = options->lambdaE;
/* For gap-reduced problems, eliminate the gaps and reduce the alphabet */
if (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE) {
ali->nCodes = strlen(ali->alphabet) - 1;
for (int i = 0; i < ali->nSites; i++)
for (int s = 0; s < ali->nSeqs; s++)
seq(s, i) -= 1;
}
/* Initialize parameters */
ali->nParams = ali->nSites * ali->nCodes
+ ali->nSites * (ali->nSites - 1) / 2 * ali->nCodes * ali->nCodes;
numeric_t *x = (numeric_t *) malloc(sizeof(numeric_t) * ali->nParams);
if (x == NULL) {
fprintf(stderr,
"ERROR: Failed to allocate a memory block for variables.\n");
exit(1);
}
for (int i = 0; i < ali->nParams; i++) x[i] = 0.0;
/* Initialize site parameters with the ML estimates
hi = log(fi) + C
A single pseudocount is added for stability
(Laplace's rule or Morcos et al. with lambda = nCodes) */
if (options->zeroAPC != 1) {
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++)
xHi(i, ai) = log(fi(i, ai) * ali->nEff + 1.0);
/* Zero-sum gauge */
for (int i = 0; i < ali->nSites; i++) {
numeric_t hSum = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++) hSum += xHi(i, ai);
numeric_t hShift = hSum / (numeric_t) ali->nCodes;
for (int ai = 0; ai < ali->nCodes; ai++)
xHi(i, ai) -= hShift;
}
}
switch(options->estimator) {
/* Point estimates */
case INFER_MAP:
/* Maximum a posteriori estimates of model parameters */
EstimatePairModelMAP(x, lambdas, ali, options);
break;
/* For: future alternative estimators */
default:
/* Maximum a posteriori estimates of model parameters */
EstimatePairModelMAP(x, lambdas, ali, options);
}
/* Restore the alignment encoding after inference */
if (options->estimatorMAP == INFER_MAP_PLM_GAPREDUCE) {
for (int i = 0; i < ali->nSites; i++)
for (int s = 0; s < ali->nSeqs; s++)
seq(s, i) += 1;
}
return (numeric_t *) x;
}
void EstimatePairModelMAP(numeric_t *x, numeric_t *lambdas, alignment_t *ali,
options_t *options) {
/* Computes Maximum a posteriori (MAP) estimates for the parameters of
and undirected graphical model by L-BFGS */
/* Start timer */
gettimeofday(&ali->start, NULL);
/* Initialize L-BFGS */
lbfgs_parameter_t param;
lbfgs_parameter_init(¶m);
param.epsilon = 1E-3;
param.max_iterations = options->maxIter; /* 0 is unbounded */
/* Array of void pointers provides relevant data structures */
void *d[3] = {(void *)ali, (void *)options, (void *)lambdas};
/* Estimate parameters by optimization */
static lbfgs_evaluate_t algo;
switch(options->estimatorMAP) {
case INFER_MAP_PLM:
algo = PLMNegLogPosterior;
break;
case INFER_MAP_PLM_GAPREDUCE:
algo = PLMNegLogPosteriorGapReduce;
break;
case INFER_MAP_PLM_BLOCK:
algo = PLMNegLogPosteriorBlock;
break;
case INFER_MAP_PLM_DROPOUT:
algo = PLMNegLogPosteriorDO;
break;
default:
algo = PLMNegLogPosterior;
}
if (options->zeroAPC == 1) fprintf(stderr,
"Estimating coupling hyperparameters le = 1/2 inverse variance\n");
int ret = 0;
lbfgsfloatval_t fx;
ret = lbfgs(ali->nParams, x, &fx, algo, ReportProgresslBFGS,
(void*)d, ¶m);
fprintf(stderr, "Gradient optimization: %s\n", LBFGSErrorString(ret));
/* Optionally re-estimate parameters with adjusted hyperparameters */
if (options->zeroAPC == 1) {
/* Form new priors on the variances */
ZeroAPCPriors(ali, options, lambdas, x);
/* Reinitialize coupling parameters */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
xEij(i, j, ai, aj) = 0.0;
/* Iterate estimation with new hyperparameter estimates */
options->zeroAPC = 2;
ret = lbfgs(ali->nParams, x, &fx, algo,
ReportProgresslBFGS, (void*)d, ¶m);
fprintf(stderr, "Gradient optimization: %s\n", LBFGSErrorString(ret));
}
}
static lbfgsfloatval_t PLMNegLogPosterior(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
/* Negative log-pseudolikelihood */
#pragma omp parallel for
for (int i = 0; i < ali->nSites; i++) {
numeric_t *H = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t *P = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t siteFx = 0.0;
/* Reshape site parameters and gradient into local blocks */
numeric_t *Xi = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int a = 0; a < ali->nCodes; a++) siteH(i, a) = xHi(i, a);
numeric_t *Di = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int d = 0; d < ali->nCodes * ali->nCodes * ali->nSites; d++)
Di[d] = 0.0;
/* Site negative conditional log likelihoods */
for (int s = 0; s < ali->nSeqs; s++) {
/* Compute potentials */
for (int a = 0; a < ali->nCodes; a++) H[a] = siteH(i, a);
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
H[a] += siteE(j, a, seq(s, j));
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
H[a] += siteE(j, a, seq(s, j));
/* Conditional distribution given sequence background */
numeric_t scale = H[0];
for (int a = 1; a < ali->nCodes; a++)
scale = (scale >= H[a] ? scale : H[a]);
for (int a = 0; a < ali->nCodes; a++) P[a] = exp(H[a] - scale);
numeric_t Z = 0;
for (int a = 0; a < ali->nCodes; a++) Z += P[a];
numeric_t Zinv = 1.0 / Z;
for (int a = 0; a < ali->nCodes; a++) P[a] *= Zinv;
/* Log-likelihood contributions are scaled by sequence weight */
numeric_t w = ali->weights[s];
siteFx -= w * log(P[seq(s, i)]);
/* Field gradient */
siteDH(i, seq(s, i)) -= w;
for (int a = 0; a < ali->nCodes; a++)
siteDH(i, a) -= -w * P[a];
/* Couplings gradient */
int ix = seq(s, i);
for (int j = 0; j < i; j++)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = i + 1; j < ali->nSites; j++)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
}
/* Contribute local loglk and gradient to global */
#pragma omp critical
{
fx += siteFx;
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int a = 0; a < ali->nCodes; a++) dHi(i, a) += siteDH(i, a);
free(Xi);
free(Di);
}
free(H);
free(P);
}
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static lbfgsfloatval_t PLMNegLogPosteriorGapReduce(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
/* Negative log-pseudolikelihood */
#pragma omp parallel for
for (int i = 0; i < ali->nSites; i++) {
numeric_t *H = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t *P = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t siteFx = 0.0;
/* Reshape site parameters and gradient into local blocks */
numeric_t *Xi = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
siteE(j, a, b) = xEij(i, j, a, b);
for (int a = 0; a < ali->nCodes; a++) siteH(i, a) = xHi(i, a);
numeric_t *Di = (numeric_t *) malloc(ali->nCodes * ali->nCodes
* ali->nSites * sizeof(numeric_t));
for (int d = 0; d < ali->nCodes * ali->nCodes * ali->nSites; d++)
Di[d] = 0.0;
/* Site negative conditional log likelihoods */
for (int s = 0; s < ali->nSeqs; s++) {
/* Only ungapped sites are considered in the model */
if (seq(s, i) >= 0) {
/* Compute potentials */
for (int a = 0; a < ali->nCodes; a++) H[a] = siteH(i, a);
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
if (seq(s, j) >= 0)
H[a] += siteE(j, a, seq(s, j));
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
if (seq(s, j) >= 0)
H[a] += siteE(j, a, seq(s, j));
/* Conditional distribution given sequence background */
numeric_t scale = H[0];
for (int a = 1; a < ali->nCodes; a++)
scale = (scale >= H[a] ? scale : H[a]);
for (int a = 0; a < ali->nCodes; a++) P[a] = exp(H[a] - scale);
numeric_t Z = 0;
for (int a = 0; a < ali->nCodes; a++) Z += P[a];
numeric_t Zinv = 1.0 / Z;
for (int a = 0; a < ali->nCodes; a++) P[a] *= Zinv;
/* Log-likelihood contributions are scaled by sequence weight */
numeric_t w = ali->weights[s];
siteFx -= w * log(P[seq(s, i)]);
/* Field gradient */
siteDH(i, seq(s, i)) -= w;
for (int a = 0; a < ali->nCodes; a++)
siteDH(i, a) -= -w * P[a];
/* Couplings gradient */
int ix = seq(s, i);
for (int j = 0; j < i; j++)
if (seq(s, j) >= 0)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = i + 1; j < ali->nSites; j++)
if (seq(s, j) >= 0)
siteDE(j, ix, seq(s, j)) -= w;
for (int j = 0; j < i; j++)
if (seq(s, j) >= 0)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
for (int j = i + 1; j < ali->nSites; j++)
if (seq(s, j) >= 0)
for (int a = 0; a < ali->nCodes; a++)
siteDE(j, a, seq(s, j)) -= -w * P[a];
}
}
/* Contribute local loglk and gradient to global */
#pragma omp critical
{
fx += siteFx;
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
for (int b = 0; b < ali->nCodes; b++)
dEij(i, j, a, b) += siteDE(j, a, b);
for (int a = 0; a < ali->nCodes; a++) dHi(i, a) += siteDH(i, a);
free(Xi);
free(Di);
}
free(H);
free(P);
}
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static lbfgsfloatval_t PLMNegLogPosteriorBlock(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
/* Block fields hi */
numeric_t *hi = (numeric_t *)
malloc(ali->nSites * ali->nCodes * sizeof(numeric_t));
numeric_t *gHi = (numeric_t *)
malloc(ali->nSites * ali->nCodes * sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) Hi(i, ai) = xHi(i, ai);
for (int i = 0; i < ali->nSites * ali->nCodes; i++) gHi[i] = 0;
/* Block couplings eij */
numeric_t *eij = (numeric_t *) malloc(ali->nSites * ali->nSites
* ali->nCodes * ali->nCodes * sizeof(numeric_t));
numeric_t *gEij = (numeric_t *) malloc(ali->nSites * ali->nSites
* ali->nCodes * ali->nCodes * sizeof(numeric_t));
for (int i = 0; i < ali->nSites * ali->nSites * ali->nCodes * ali->nCodes;
i++) eij[i] = 0.0;
for (int i = 0; i < ali->nSites * ali->nSites * ali->nCodes * ali->nCodes;
i++) gEij[i] = 0.0;
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
Eij(j, aj, i, ai) = Eij(i, ai, j, aj) = xEij(i, j, ai, aj);
/* Negative log-pseudolikelihood */
for (int s = 0; s < ali->nSeqs; s++) {
/* Form potential for conditional log likelihoods at every site */
numeric_t *H = (numeric_t *)
malloc(ali->nCodes * ali->nSites * sizeof(numeric_t));
numeric_t *Z = (numeric_t *) malloc(ali->nSites * sizeof(numeric_t));
/* Initialize potentials with fields */
// memcpy(H, hi, ali->nSites * ali->nCodes * sizeof(numeric_t));
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++) H[jx] = hi[jx];
/* Contribute coupling block due to i, ai */
for (int i = 0; i < ali->nSites; i++) {
const letter_t ai = seq(s, i);
const numeric_t *jB = &(Eij(i, ai, 0, 0));
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++)
H[jx] += jB[jx];
}
/* Conditional log likelihoods */
for (int i = 0; i < ali->nSites * ali->nCodes; i++) H[i] = exp(H[i]);
for (int i = 0; i < ali->nSites; i++) Z[i] = 0;
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nSites; ai++) Z[i] += Hp(i, ai);
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nSites; ai++) Hp(i, ai) /= Z[i];
numeric_t seqFx = 0;
for (int i = 0; i < ali->nSites; i++)
seqFx -= ali->weights[s] * log(Hp(i, seq(s, i)));
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++)
H[jx] *= -ali->weights[s];
for (int i = 0; i < ali->nSites; i++)
gHi(i, seq(s, i)) -= ali->weights[s];
for(int jx = 0; jx < ali->nSites * ali->nCodes; jx++) gHi[jx] -= H[jx];
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i; j < ali->nSites; j++)
gEij(i, seq(s, i), j, seq(s, j)) -= ali->weights[s];
for (int i = 0; i < ali->nSites; i++) {
const letter_t ai = seq(s, i);
numeric_t *jgBlock = &(gEij(i, ai, 0, 0));
for (int jx = 0; jx < ali->nSites * ali->nCodes; jx++)
jgBlock[jx] -= H[jx];
}
free(H);
free(Z);
fx += seqFx;
}
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++)
dHi(i, ai) += gHi(i, ai);
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
dEij(i, j, ai, aj) += gEij(j, aj, i, ai) + gEij(i, ai, j, aj);
free(hi);
free(gHi);
free(eij);
free(gEij);
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static lbfgsfloatval_t PLMNegLogPosteriorDO(void *instance,
const lbfgsfloatval_t *x, lbfgsfloatval_t *g, const int n,
const lbfgsfloatval_t step) {
/* Compute the the negative log posterior, which is the negative
penalized log-(pseudo)likelihood and the objective for MAP inference
*/
void **d = (void **)instance;
alignment_t *ali = (alignment_t *) d[0];
options_t *options = (options_t *) d[1];
numeric_t *lambdas = (numeric_t *) d[2];
/* Initialize log-likelihood and gradient */
lbfgsfloatval_t fx = 0.0;
for (int i = 0; i < ali->nParams; i++) g[i] = 0;
numeric_t *H = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
numeric_t *P = (numeric_t *) malloc(ali->nCodes * sizeof(numeric_t));
int *drop_mask = (int *) malloc(ali->nParams * sizeof(int));
for (int s = 0; s < ali->nSeqs; s++) {
/* Generate random bit mask over parameters */
for (int p = 0; p < ali->nParams; p ++)
drop_mask[p] = (int) rand() % 2;
/* Pseudolikelihood objective */
for (int i = 0; i < ali->nSites; i++) {
for (int a = 0; a < ali->nCodes; a++) H[a] = bitHi(i, a)
* xHi(i, a);
for (int a = 0; a < ali->nCodes; a++)
for (int j = 0; j < i; j++)
H[a] += bitEij(i, j, a, seq(s, j))
* xEij(i, j, a, seq(s, j));
for (int a = 0; a < ali->nCodes; a++)
for (int j = i + 1; j < ali->nSites; j++)
H[a] += bitEij(i, j, a, seq(s, j))
* xEij(i, j, a, seq(s, j));
/* Compute distribution from potential */
for (int a = 0; a < ali->nCodes; a++) P[a] = exp(H[a]);
numeric_t Z = 0;
for (int a = 0; a < ali->nCodes; a++) Z += P[a];
numeric_t Zinv = 1.0 / Z;
for (int a = 0; a < ali->nCodes; a++) P[a] *= Zinv;
/* Log-likelihood contributions */
fx -= ali->weights[s] * log(P[seq(s, i)]);
/* Field gradient */
dHi(i, seq(s, i)) -= bitHi(i, seq(s, i)) * ali->weights[s];
for (int a = 0; a < ali->nCodes; a++)
dHi(i, a) -= -bitHi(i, a) * ali->weights[s] * P[a];
/* Couplings gradient */
for (int j = 0; j < i; j++)
dEij(i, j, seq(s, i), seq(s, j)) -=
bitEij(i, j, seq(s, i), seq(s, j)) * ali->weights[s];
for (int j = i + 1; j < ali->nSites; j++)
dEij(i, j, seq(s, i), seq(s, j)) -=
bitEij(i, j, seq(s, i), seq(s, j)) * ali->weights[s];
for (int j = 0; j < i; j++)
for (int a = 0; a < ali->nCodes; a++)
dEij(i, j, a, seq(s, j)) -=
-bitEij(i, j, a, seq(s, j)) * ali->weights[s] * P[a];
for (int j = i + 1; j < ali->nSites; j++)
for (int a = 0; a < ali->nCodes; a++)
dEij(i, j, a, seq(s, j)) -=
-bitEij(i, j, a, seq(s, j)) * ali->weights[s] * P[a];
}
}
free(H);
free(P);
free(drop_mask);
ali->negLogLk = fx;
/* Gaussian priors */
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++) {
dHi(i, ai) += lambdaHi(i) * 2.0 * xHi(i, ai);
fx += lambdaHi(i) * xHi(i, ai) * xHi(i, ai);
}
for (int i = 0; i < ali->nSites-1; i++)
for (int j = i + 1; j < ali->nSites; j++)
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++) {
dEij(i, j, ai, aj) += lambdaEij(i, j)
* 2.0 * xEij(i, j, ai, aj);
fx += lambdaEij(i, j)
* xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
}
fx = PostCondition(x, g, fx, ali, options);
return fx;
}
static int ReportProgresslBFGS(void *instance, const lbfgsfloatval_t *x,
const lbfgsfloatval_t *g, const lbfgsfloatval_t fx,
const lbfgsfloatval_t xnorm, const lbfgsfloatval_t gnorm,
const lbfgsfloatval_t step, int n, int k, int ls) {
void **d = (void **)instance;
alignment_t *ali = (alignment_t *)d[0];
/* Compute norms of relevant parameters */
lbfgsfloatval_t hNorm = 0.0, eNorm = 0.0, hGNorm = 0.0, eGNorm = 0.0;
for (int i = 0; i < ali->nSites * ali->nCodes; i++)
hNorm += x[i]*x[i];
for (int i = 0; i < ali->nSites * ali->nCodes; i++)
hGNorm += g[i]*g[i];
for (int i = ali->nSites * ali->nCodes; i < ali->nParams; i++)
eNorm += x[i]*x[i];
for (int i = ali->nSites * ali->nCodes; i < ali->nParams; i++)
eGNorm += g[i]*g[i];
hNorm = sqrt(hNorm);
hGNorm = sqrt(hGNorm);
eNorm = sqrt(eNorm);
eGNorm = sqrt(eGNorm);
/* Retrieve elapsed time */
static struct timeval now;
gettimeofday(&now, NULL);
if (now.tv_usec < ali->start.tv_usec) {
int nsec = (ali->start.tv_usec - now.tv_usec) / 1000000 + 1;
ali->start.tv_usec -= 1000000 * nsec;
ali->start.tv_sec += nsec;
}
if (now.tv_usec - ali->start.tv_usec > 1000000) {
int nsec = (now.tv_usec - ali->start.tv_usec) / 1000000;
ali->start.tv_usec += 1000000 * nsec;
ali->start.tv_sec -= nsec;
}
numeric_t elapsed = (numeric_t) (now.tv_sec - ali->start.tv_sec)
+ ((numeric_t) (now.tv_usec - ali->start.tv_usec)) / 1E6;
if (k == 1) fprintf(stderr,
"iter\ttime\tcond\tfx\t-loglk"
"\t||h||\t||e||\n");
fprintf(stderr, "%d\t%.1f\t%.2f\t%.1f\t%.1f\t%.1f\t%.1f\n",
k, elapsed, gnorm / xnorm, fx, ali->negLogLk, hNorm, eNorm);
return 0;
}
void PreCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g, alignment_t *ali, options_t *options) {
/* Currently empty */
}
lbfgsfloatval_t PostCondition(const lbfgsfloatval_t *x, lbfgsfloatval_t *g, lbfgsfloatval_t fx, alignment_t *ali, options_t *options) {
if (options->zeroAPC == 1)
for (int i = 0; i < ali->nSites; i++)
for (int ai = 0; ai < ali->nCodes; ai++)
dHi(i, ai) = 0.0;
/* Group (L1/L2) regularization */
if (options->lambdaGroup > 0)
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
double l2 = REGULARIZATION_GROUP_EPS;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
l2 += xEij(i, j, ai, aj) * xEij(i, j, ai, aj);
double l1 = sqrt(l2);
fx += options->lambdaGroup * l1;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
dEij(i, j, ai, aj) += options->lambdaGroup * xEij(i, j, ai, aj) / l1;
}
return fx;
}
void ZeroAPCPriors(alignment_t *ali, options_t *options, numeric_t *lambdas,
lbfgsfloatval_t *x) {
/* Compute the variances of the couplings for each pair */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++) {
/* Mean(eij) over ai, aj */
numeric_t mean = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
mean += xEij(i, j, ai, aj);
mean *= 1.0 / ((numeric_t) ali->nCodes * ali->nCodes);
/* Var(eij) over ai, aj */
numeric_t ssq = 0.0;
for (int ai = 0; ai < ali->nCodes; ai++)
for (int aj = 0; aj < ali->nCodes; aj++)
ssq += (xEij(i, j, ai, aj) - mean)
* (xEij(i, j, ai, aj) - mean);
/* Use N rather than N-1 since N has better MSE */
numeric_t var = ssq / ((numeric_t) (ali->nCodes * ali->nCodes));
lambdaEij(i, j) = var;
}
/* Determine the site-wise statistics of the variances */
numeric_t nPairs = ((numeric_t) ((ali->nSites) * (ali->nSites - 1))) / 2.0;
numeric_t V_avg = 0.0;
numeric_t *V_pos_avg = (numeric_t *) malloc(ali->nSites * sizeof(numeric_t));
for (int i = 0; i < ali->nSites; i++) {
V_pos_avg[i] = 0.0;
}
for (int i = 0; i < ali->nSites - 1; i++) {
for (int j = i + 1; j < ali->nSites; j++) {
V_pos_avg[i] += lambdaEij(i, j) / (numeric_t) (ali->nSites - 1);
V_pos_avg[j] += lambdaEij(i, j) / (numeric_t) (ali->nSites - 1);
V_avg += lambdaEij(i, j) / nPairs;
}
}
/* Remove the first component of the variances */
for (int i = 0; i < ali->nSites - 1; i++)
for (int j = i + 1; j < ali->nSites; j++)
lambdaEij(i, j) =
lambdaEij(i, j) - V_pos_avg[i] * V_pos_avg[j] / V_avg;
/* Transform and truncate variances into lambda hyperparameters */
numeric_t pcount = 0.0;
numeric_t psum = 0.0;
numeric_t inbounds = 0;
numeric_t min = LAMBDA_J_MAX;
numeric_t max = LAMBDA_J_MIN;
for (int i = 0; i < ali->nSites - 1; i++) {
for (int j = i + 1; j < ali->nSites; j++) {
/* Lambda coefficients are 1/2 the inverse variance */
if (lambdaEij(i, j) > 0) {
lambdaEij(i, j) = 1.0 / (2.0 * lambdaEij(i, j));
psum += lambdaEij(i, j);
pcount += 1.0;
} else {
lambdaEij(i, j) = LAMBDA_J_MAX + 1.0;
}
/* Truncate lambda for numerical stability */
if (lambdaEij(i, j) >= LAMBDA_J_MIN && lambdaEij(i, j) <= LAMBDA_J_MAX)
inbounds += 1.0 / (numeric_t) ((ali->nSites)*(ali->nSites - 1) / 2.0);
if (lambdaEij(i, j) < 0 || !isfinite(lambdaEij(i, j)))
lambdaEij(i, j) = LAMBDA_J_MAX;
if (lambdaEij(i, j) < LAMBDA_J_MIN) lambdaEij(i, j) = LAMBDA_J_MIN;
if (lambdaEij(i, j) > LAMBDA_J_MAX) lambdaEij(i, j) = LAMBDA_J_MAX;
/* Track extremes */
if (lambdaEij(i, j) > max) max = lambdaEij(i, j);
if (lambdaEij(i, j) < min) min = lambdaEij(i, j);
}
}
fprintf(stderr, "Raw coupling hyperparameter statistics:\n"
"\tMean positive lambda: %f\n"
"\tPercent of ij's positive: %f\n"
"\tPercent in bounds (%f < L < %f): %f\n",
psum / pcount,
pcount / nPairs,
min, max, inbounds);
}
const char *LBFGSErrorString(int ret) {
const char *p;
switch(ret) {
case LBFGSERR_UNKNOWNERROR:
p = "UNKNOWNERROR";
break;
/** Logic error. */
case LBFGSERR_LOGICERROR:
p = "LOGICERROR";
break;
/** Insufficient memory. */
case LBFGSERR_OUTOFMEMORY:
p = "OUTOFMEMORY";
break;
/** The minimization process has been canceled. */
case LBFGSERR_CANCELED:
p = "CANCELED";
break;
/** Invalid number of variables specified. */
case LBFGSERR_INVALID_N:
p = "INVALID_N";
break;
/** Invalid number of variables (for SSE) specified. */
case LBFGSERR_INVALID_N_SSE:
p = "INVALID_N_SSE";
break;
/** The array x must be aligned to 16 (for SSE). */
case LBFGSERR_INVALID_X_SSE:
p = "INVALID_X_SSE";
break;
/** Invalid parameter lbfgs_parameter_t::epsilon specified. */
case LBFGSERR_INVALID_EPSILON:
p = "INVALID_EPSILON";
break;
/** Invalid parameter lbfgs_parameter_t::past specified. */
case LBFGSERR_INVALID_TESTPERIOD:
p = "INVALID_TESTPERIOD";
break;
/** Invalid parameter lbfgs_parameter_t::delta specified. */
case LBFGSERR_INVALID_DELTA:
p = "INVALID_DELTA";
break;
/** Invalid parameter lbfgs_parameter_t::linesearch specified. */
case LBFGSERR_INVALID_LINESEARCH:
p = "INVALID_LINESEARCH";
break;
/** Invalid parameter lbfgs_parameter_t::max_step specified. */
case LBFGSERR_INVALID_MINSTEP:
p = "INVALID_MINSTEP";
break;
/** Invalid parameter lbfgs_parameter_t::max_step specified. */
case LBFGSERR_INVALID_MAXSTEP:
p = "INVALID_MAXSTEP";
break;
/** Invalid parameter lbfgs_parameter_t::ftol specified. */
case LBFGSERR_INVALID_FTOL:
p = "INVALID_FTOL";
break;
/** Invalid parameter lbfgs_parameter_t::wolfe specified. */
case LBFGSERR_INVALID_WOLFE:
p = "INVALID_WOLFE";
break;
/** Invalid parameter lbfgs_parameter_t::gtol specified. */
case LBFGSERR_INVALID_GTOL:
p = "INVALID_GTOL";
break;
/** Invalid parameter lbfgs_parameter_t::xtol specified. */
case LBFGSERR_INVALID_XTOL:
p = "INVALID_XTOL";
break;
/** Invalid parameter lbfgs_parameter_t::max_linesearch specified. */
case LBFGSERR_INVALID_MAXLINESEARCH:
p = "INVALID_MAXLINESEARCH";
break;
/** Invalid parameter lbfgs_parameter_t::orthantwise_c specified. */
case LBFGSERR_INVALID_ORTHANTWISE:
p = "INVALID_ORTHANTWISE";
break;
/** Invalid parameter lbfgs_parameter_t::orthantwise_start specified. */
case LBFGSERR_INVALID_ORTHANTWISE_START:
p = "INVALID_ORTHANTWISE_START";
break;
/** Invalid parameter lbfgs_parameter_t::orthantwise_end specified. */
case LBFGSERR_INVALID_ORTHANTWISE_END:
p = "ORTHANTWISE_END";
break;
/** The line-search step went out of the interval of uncertainty. */
case LBFGSERR_OUTOFINTERVAL:
p = "OUTOFINTERVAL";
break;
/** A logic error occurred; alternatively: the interval of uncertainty
became too small. */
case LBFGSERR_INCORRECT_TMINMAX:
p = "INCORRECT_TMINMAX";
break;
/** A rounding error occurred; alternatively: no line-search step
satisfies the sufficient decrease and curvature conditions. */
case LBFGSERR_ROUNDING_ERROR:
p = "ROUNDING_ERROR";
break;
/** The line-search step became smaller than lbfgs_parameter_t::min_step. */
case LBFGSERR_MINIMUMSTEP:
p = "MINIMUMSTEP";
break;
/** The line-search step became larger than lbfgs_parameter_t::max_step. */
case LBFGSERR_MAXIMUMSTEP:
p = "MAXILBFGSERR_MUMSTEP";
break;
/** The line-search routine reaches the maximum number of evaluations. */
case LBFGSERR_MAXIMUMLINESEARCH:
p = "MAXIMUMLINESEARCH";
break;
/** The algorithm routine reaches the maximum number of iterations. */
case LBFGSERR_MAXIMUMITERATION:
p = "MAXIMUMITERATION";
break;
/** Relative width of the interval of uncertainty is at most
lbfgs_parameter_t::xtol. */
case LBFGSERR_WIDTHTOOSMALL:
p = "WIDTHTOOSMALL";
break;
/** A logic error (negative line-search step) occurred. */
case LBFGSERR_INVALIDPARAMETERS:
p = "INVALIDPARAMETERS";
break;
/** The current search direction increases the objective function value. */
case LBFGSERR_INCREASEGRADIENT:
p = "INCREASEGRADIENT";
break;
case 0:
p = "Minimization success";
break;
default:
p = "No detected error";
break;
}
return p;
} |
zSchCompUdt-cuda.c | /*! \file
Copyright (c) 2003, The Regents of the University of California, through
Lawrence Berkeley National Laboratory (subject to receipt of any required
approvals from U.S. Dept. of Energy)
All rights reserved.
The source code is distributed under BSD license, see the file License.txt
at the top-level directory.
*/
/*! @file
* \brief This file contains the main loop of pzgstrf which involves
* rank k update of the Schur complement.
* Uses CUDA GPU.
*
* <pre>
* -- Distributed SuperLU routine (version 4.0) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
*/
#define SCHEDULE_STRATEGY dynamic
#define cublasCheckErrors(fn) \
do { \
cublasStatus_t __err = fn; \
if (__err != CUBLAS_STATUS_SUCCESS) { \
fprintf(stderr, "Fatal cublas error: %d (at %s:%d)\n", \
(int)(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while(0);
int full;
double gemm_timer = 0.0;
double scatter_timer = 0.0;
if ( msg0 && msg2 ) { /* L(:,k) and U(k,:) are not empty. */
ldu =0;
full =1;
int cum_nrow;
int temp_nbrow;
lptr = lptr0;
luptr = luptr0;
nbrow= lsub[1];
if (myrow==krow) nbrow = lsub[1]-lsub[3];
if (nbrow>0) {
int ncol_max = SUPERLU_MIN(buffer_size/nbrow,bigu_size/ldt);
int num_streams_used, /*number of streams that will be used*/
ncpu_blks; /*Number of CPU dgemm blks*/
int jjj, jjj_st,jjj_global;
for (j = jj0; j < nub; ++j) {
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
ncols =0 ; //initialize at 0
jj = iukp;
int temp_ldu=0;
for (; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
++ncols;
}
temp_ldu = SUPERLU_MAX(temp_ldu, segsize);
}
full_u_cols[j] = ncols;
blk_ldu[j] = temp_ldu;
} /* end for j = jj0..nub */
jjj = jj0; /* initialization */
// #pragma omp barrier
while ( jjj < nub ) {
jjj_st=jjj;
#ifdef _OPENMP
#pragma omp single
#endif
{
ldu = blk_ldu[jjj_st];
for (j = jjj_st; j < nub ; ++j) {
/* prefix sum */
if (j != jjj_st) full_u_cols[j] += full_u_cols[j-1];
ldu = SUPERLU_MAX(ldu, blk_ldu[j]);
/* break condition */
/* the number of columns that can be processed is limited by buffer size*/
if (full_u_cols[j]+((j+1==nub)?0:full_u_cols[j+1]) > ncol_max) {
break;
}
} /* end for j=jjj_st to nub */
jjj_global = SUPERLU_MIN(nub, j+1); /* Maximum value of jjj will be nub */
// TAU_STATIC_TIMER_START("work_divison");
/* Divide CPU-GPU gemm here */
gemm_division_cpu_gpu(
&num_streams_used, /*number of streams that will be used*/
stream_end_col, /*array holding last column blk for each partition*/
&ncpu_blks, /*Number of CPU gemm blks*/
/*input*/
nbrow, /*number of row in A matrix*/
ldu, /*number of k in dgemm*/
nstreams,
full_u_cols + jjj_st, /*array containing prefix sum of work load*/
jjj_global-jjj_st /*Number of work load */
);
// TAU_STATIC_TIMER_STOP("work_divison");
} /* pragma omp single */
jjj = jjj_global;
// printf("thread_id %d, jjj %d \n",thread_id,jjj );
if (jjj == jjj_st+1 && full_u_cols[jjj_st] > ncol_max) {
printf("allocate more memory for buffer !!!!\n");
if(nbrow * full_u_cols[jjj_st] > buffer_size)
printf("%d buffer_size %d\n",nbrow*full_u_cols[jjj_st],buffer_size );
}
// #pragma omp barrier
/* gathering circuit */
assert(jjj_st<nub);
assert(jjj-1<nub);
// TAU_STATIC_TIMER_START("GATHER_U");
#ifdef _OPENMP
#pragma omp for schedule( SCHEDULE_STRATEGY )
#endif
for (j = jjj_st; j < jjj; ++j) {
if (j==jjj_st) tempu = bigU;
else tempu = bigU + ldu*full_u_cols[j-1];
/* == processing each of the remaining columns == */
arrive_at_ublock(j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid);
// tempu = tempU2d;
for (jj = iukp; jj < iukp+nsupc; ++jj) {
segsize = klst - usub[jj];
if ( segsize ) {
lead_zero = ldu - segsize;
for (i = 0; i < lead_zero; ++i) tempu[i] = zero;
tempu += lead_zero;
for (i = 0; i < segsize; ++i)
tempu[i] = uval[rukp+i];
rukp += segsize;
tempu += segsize;
}
}
rukp -= usub[iukp - 1]; /* Return to start of U(k,j). */
} /* end for j=jjj_st to jjj */
if ( num_streams_used > 0 ) {
#ifdef PI_DEBUG
printf("nbrow %d *ldu %d =%d < ldt %d * max_row_size %d =%d \n",nbrow,ldu,nbrow*ldu,ldt,max_row_size,ldt*max_row_size );
assert(nbrow*ldu<=ldt*max_row_size);
#endif
cudaMemcpy2DAsync(dA, nbrow*sizeof(doublecomplex),
&lusup[luptr+(knsupc-ldu)*nsupr],
nsupr*sizeof(doublecomplex), nbrow*sizeof(doublecomplex),
ldu, cudaMemcpyHostToDevice, streams[0]);
}
for (int i = 0; i < num_streams_used; ++i) {
int st = (i==0) ? ncpu_blks+jjj_st : jjj_st+stream_end_col[i-1];
int st_col = full_u_cols[st-1];
int num_col_stream = full_u_cols[jjj_st+stream_end_col[i]-1]-full_u_cols[st-1];
tempu = bigU;
doublecomplex *tempv1 = bigV + full_u_cols[st-1]*nbrow;
/* Following is for testing purpose */
#ifdef GPU_ACC
int stream_id = i;
int b_offset = ldu * st_col;
int c_offset = st_col * nbrow;
size_t B_stream_size = ldu * num_col_stream * sizeof(doublecomplex);
size_t C_stream_size = nbrow * num_col_stream * sizeof(doublecomplex);
assert(ldu*(st_col+num_col_stream) < bigu_size);
assert(nbrow*(st_col+num_col_stream) < buffer_size);
cudaMemcpyAsync(dB+b_offset, tempu+b_offset, B_stream_size,
cudaMemcpyHostToDevice, streams[stream_id]);
cublasCheckErrors(
cublasSetStream(handle[stream_id],
streams[stream_id])
);
cublasCheckErrors(
cublasZgemm(handle[stream_id],
CUBLAS_OP_N, CUBLAS_OP_N,
nbrow, num_col_stream, ldu,
(const cuDoubleComplex*) &alpha,
(const cuDoubleComplex*) dA,
nbrow,
(const cuDoubleComplex*) &dB[b_offset],
ldu,
(const cuDoubleComplex*) &beta,
(cuDoubleComplex*)&dC[c_offset],
nbrow)
);
checkCuda( cudaMemcpyAsync(tempv1, dC+c_offset,
C_stream_size,
cudaMemcpyDeviceToHost,
streams[stream_id]) );
#else
if ( num_col_stream > 0 ) {
my_zgemm_("N", "N", &nbrow, &num_col_stream, &ldu,
&alpha, &lusup[luptr+(knsupc-ldu)*nsupr],
&nsupr, tempu+ldu*st_col, &ldu, &beta,
tempv1, &nbrow, 1, 1);
}
#endif
} /* end for i = 1 to num_streams used */
int num_col = full_u_cols[jjj_st+ncpu_blks-1];
int st_col = 0; /*special case for cpu */
tempv = bigV + nbrow * st_col;
tempu = bigU;
double tstart = SuperLU_timer_();
#if defined (USE_VENDOR_BLAS)
zgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha,
&lusup[luptr+(knsupc-ldu)*nsupr], &nsupr,
tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow, 1, 1);
#else
zgemm_("N", "N", &nbrow, &num_col, &ldu, &alpha,
&lusup[luptr+(knsupc-ldu)*nsupr], &nsupr,
tempu+ldu*st_col, &ldu, &beta, tempv, &nbrow);
#endif
gemm_timer += SuperLU_timer_() -tstart;
stat->ops[FACT] += 2 * nbrow * ldu * full_u_cols[jjj-1];
// printf("after zgemm \n");
/* Now scattering blocks handled by cpu */
int temp_ncol;
/* scatter first blocks which cpu has computated*/
tstart = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel \
private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \
segsize,lead_zero, \
ib, temp_nbrow,ilst,lib,index, \
ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \
nzval, lb , jj, i) \
firstprivate(luptr,lptr) default (shared)
#endif
{
int thread_id = omp_get_thread_num();
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
doublecomplex* tempv1;
if (ncpu_blks< omp_get_num_threads()) {
// TAU_STATIC_TIMER_START("SPECIAL_CPU_SCATTER");
for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) {
/* code */
#ifdef PI_DEBUG
printf("scattering %d block column\n",j);
#endif
/* == processing each of the remaining columns == */
if(j==jjj_st) tempv1 = bigV;
else tempv1 = bigV + full_u_cols[j-1]*nbrow;
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
cum_nrow =0 ;
/* do update with the kth column of L and (k,j)th block of U */
lptr = lptr0;
luptr = luptr0;
#ifdef _OPENMP
#pragma omp for schedule( SCHEDULE_STRATEGY ) nowait
#endif
for (lb = 0; lb < nlb; lb++ ) {
int cum_nrow = 0;
int temp_nbrow;
lptr = lptr0;
luptr = luptr0;
for (int i = 0; i < lb; ++i) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow +=temp_nbrow;
}
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
assert(temp_nbrow<=nbrow);
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
/* Now gather the result into the destination block. */
if ( ib < jb ) { /* A(i,j) is in U. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
zscatter_u (
ib,jb,
nsupc,iukp,xsup,
klst,nbrow,
lptr,temp_nbrow,lsub,
usub,tempv,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else { /* A(i,j) is in L. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
zscatter_l (
ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr,
temp_nbrow,usub,lsub,tempv,
indirect_thread,indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
} /* if ib < jb ... */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow += temp_nbrow;
} /* for lb ... */
luptr=luptr0;
} /* for j = jjj_st ... */
// TAU_STATIC_TIMER_STOP("SPECIAL_CPU_SCATTER");
} else {
#ifdef _OPENMP
#pragma omp for schedule(SCHEDULE_STRATEGY) nowait
#endif
for (j = jjj_st; j < jjj_st+ncpu_blks; ++j) {
/* code */
#ifdef PI_DEBUG
printf("scattering %d block column\n",j);
#endif
/* == processing each of the remaining columns == */
if(j==jjj_st) tempv1 = bigV;
else tempv1 = bigV + full_u_cols[j-1]*nbrow;
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
cum_nrow =0 ;
/* do update with the kth column of L and (k,j)th block of U */
lptr = lptr0;
luptr = luptr0;
for (lb = 0; lb < nlb; lb++ ) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
assert(temp_nbrow<=nbrow);
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
#ifdef DGEMM_STAT
if(j==jjj_st) {
temp_ncol = full_u_cols[j];
} else {
temp_ncol = full_u_cols[j]- full_u_cols[j-1];
}
printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu);
#endif
/* Now gather the result into the destination block. */
if ( ib < jb ) { /* A(i,j) is in U. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
zscatter_u (
ib,jb,
nsupc,iukp,xsup,
klst,nbrow,
lptr,temp_nbrow,lsub,
usub,tempv,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else { /* A(i,j) is in L. */
#ifdef PI_DEBUG
printf("cpu scatter \n");
printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
zscatter_l (
ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr,
temp_nbrow,usub,lsub,tempv,
indirect_thread,indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
} /* if ib < jb ... */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow += temp_nbrow;
} /* for lb ... */
luptr=luptr0;
} /* for j = jjj_st ... */
} /* else if (ncpu_blks >= omp_get_num_threads()) */
} /* parallel region */
scatter_timer += SuperLU_timer_() - tstart;
#ifdef _OPENMP
#pragma omp parallel \
private(j,iukp,rukp, tempu, tempv, cum_nrow, jb, nsupc,ljb, \
segsize,lead_zero, \
ib, temp_nbrow,ilst,lib,index, \
ijb,fnz,ucol,rel,ldv,lptrj,luptrj, \
nzval, lb , jj, i) \
firstprivate(luptr,lptr) default (shared)
#endif
{
int thread_id = omp_get_thread_num();
int* indirect_thread = indirect + ldt*thread_id;
int* indirect2_thread = indirect2 + ldt*thread_id;
doublecomplex* tempv1;
for(i = 0; i < num_streams_used; i++) { /* i is private variable */
checkCuda(cudaStreamSynchronize (streams[i]));
int jjj_st1 = (i==0) ? jjj_st + ncpu_blks : jjj_st + stream_end_col[i-1];
int jjj_end = jjj_st + stream_end_col[i];
assert(jjj_end-1<nub);
assert(jjj_st1>jjj_st) ;
/* now scatter it */
#pragma omp for schedule( SCHEDULE_STRATEGY ) nowait
for (j = jjj_st1; j < jjj_end; ++j) {
/* code */
#ifdef PI_DEBUG
printf("scattering %d block column\n",j);
#endif
/* == processing each of the remaining columns == */
if(j==jjj_st) tempv1 = bigV;
else tempv1 = bigV + full_u_cols[j-1]*nbrow;
arrive_at_ublock( j,&iukp,&rukp,&jb,&ljb,&nsupc,
iukp0,rukp0,usub,perm_u,xsup,grid );
cum_nrow =0 ;
/* do update with the kth column of L and (k,j)th block of U */
lptr = lptr0;
luptr = luptr0;
for (lb = 0; lb < nlb; lb++) {
ib = lsub[lptr]; /* Row block L(i,k). */
temp_nbrow = lsub[lptr+1]; /* Number of full rows. */
assert(temp_nbrow<=nbrow);
lptr += LB_DESCRIPTOR; /* Skip descriptor. */
#ifdef DGEMM_STAT
if(j==jjj_st) {
temp_ncol = full_u_cols[j];
} else {
temp_ncol = full_u_cols[j]- full_u_cols[j-1];
}
printf("%d %d %d \n",temp_nbrow, temp_ncol,ldu);
#endif
/* Now gather the result into the destination block. */
if ( ib < jb ) { /* A(i,j) is in U. */
#ifdef PI_DEBUG
printf("gpu scatter \n");
printf("A(%d,%d) goes to U block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
zscatter_u (
ib,jb,
nsupc,iukp,xsup,
klst,nbrow,
lptr,temp_nbrow,lsub,
usub,tempv,
Ufstnz_br_ptr,
Unzval_br_ptr,
grid
);
} else { /* A(i,j) is in L. */
#ifdef PI_DEBUG
printf("gpu scatter \n");
printf("A(%d,%d) goes to L block %d \n", ib,jb,ljb);
#endif
tempv = tempv1+cum_nrow;
zscatter_l (
ib, ljb,nsupc,iukp,xsup,klst,nbrow,lptr,
temp_nbrow,usub,lsub,tempv,
indirect_thread,indirect2_thread,
Lrowind_bc_ptr,Lnzval_bc_ptr,grid
);
} /* if ib < jb ... */
lptr += temp_nbrow;
luptr += temp_nbrow;
cum_nrow += temp_nbrow;
} /* for lb ... */
luptr=luptr0;
} /* for j = jjj_st ... */
} /* end for i = 0 to nstreams */
// TAU_STATIC_TIMER_STOP("GPU_SCATTER");
// TAU_STATIC_TIMER_STOP("INSIDE_OMP");
} /* end pragma omp parallel */
// TAU_STATIC_TIMER_STOP("OUTSIDE_OMP");
} /* end while(jjj<nub) */
} /* if nbrow>0 */
} /* if msg1 and msg 2 */
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 512;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-1,2)),ceild(24*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(12*t1+Ny+21,8)),floord(24*t2+Ny+20,8)),floord(24*t1-24*t2+Nz+Ny+19,8));t3++) {
for (t4=max(max(max(0,ceild(3*t1-127,128)),ceild(24*t2-Nz-508,512)),ceild(8*t3-Ny-508,512));t4<=min(min(min(min(floord(Nt+Nx-4,512),floord(12*t1+Nx+21,512)),floord(24*t2+Nx+20,512)),floord(8*t3+Nx+4,512)),floord(24*t1-24*t2+Nz+Nx+19,512));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),8*t3-Ny+2),512*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),8*t3+6),512*t4+510),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) {
lbv=max(512*t4,t5+1);
ubv=min(512*t4+511,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
parallel_master_taskloop_simd_misc_messages.c | // RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp -fopenmp-version=50 -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=45 -verify=expected,omp45 -triple x86_64-unknown-unknown %s -Wuninitialized
// RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -fopenmp-version=50 -verify=expected,omp50 -triple x86_64-unknown-unknown %s -Wuninitialized
void xxx(int argc) {
int x; // expected-note {{initialize the variable 'x' to silence this warning}}
#pragma omp parallel master taskloop simd
for (int i = 0; i < 10; ++i)
argc = x; // expected-warning {{variable 'x' is uninitialized when used here}}
}
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd
// expected-error@+1 {{unexpected OpenMP directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd foo
void test_no_clause() {
int i;
#pragma omp parallel master taskloop simd
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{statement after '#pragma omp parallel master taskloop simd' must be a for loop}}
#pragma omp parallel master taskloop simd
++i;
}
void test_branch_protected_scope() {
int i = 0;
L1:
++i;
int x[24];
#pragma omp parallel
#pragma omp parallel master taskloop simd
for (i = 0; i < 16; ++i) {
if (i == 5)
goto L1; // expected-error {{use of undeclared label 'L1'}}
else if (i == 6)
return; // expected-error {{cannot return from OpenMP region}}
else if (i == 7)
goto L2;
else if (i == 8) {
L2:
x[i]++;
}
}
if (x[0] == 0)
goto L2; // expected-error {{use of undeclared label 'L2'}}
else if (x[1] == 1)
goto L1;
}
void test_invalid_clause() {
int i, a;
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd foo bar
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{directive '#pragma omp parallel master taskloop simd' cannot contain more than one 'nogroup' clause}}
#pragma omp parallel master taskloop simd nogroup nogroup
for (i = 0; i < 16; ++i)
;
// expected-error@+1 {{unexpected OpenMP clause 'in_reduction' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd in_reduction(+:a)
for (i = 0; i < 16; ++i)
;
}
void test_non_identifiers() {
int i, x;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd;
for (i = 0; i < 16; ++i)
;
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel
#pragma omp parallel master taskloop simd linear(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd private(x);
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+1 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
#pragma omp parallel master taskloop simd, private(x);
for (i = 0; i < 16; ++i)
;
}
extern int foo();
void test_collapse() {
int i;
#pragma omp parallel
// expected-error@+1 {{expected '('}}
#pragma omp parallel master taskloop simd collapse
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd collapse(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd collapse()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd collapse(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd collapse(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-warning@+2 {{extra tokens at the end of '#pragma omp parallel master taskloop simd' are ignored}}
// expected-error@+1 {{expected '('}}
#pragma omp parallel master taskloop simd collapse 4)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4,
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4, )
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4, , 4)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
#pragma omp parallel master taskloop simd collapse(4)
for (int i1 = 0; i1 < 16; ++i1)
for (int i2 = 0; i2 < 16; ++i2)
for (int i3 = 0; i3 < 16; ++i3)
for (int i4 = 0; i4 < 16; ++i4)
foo();
#pragma omp parallel
// expected-error@+2 {{expected ')'}}
// expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}}
#pragma omp parallel master taskloop simd collapse(4, 8)
for (i = 0; i < 16; ++i)
; // expected-error {{expected 4 for loops after '#pragma omp parallel master taskloop simd', but found only 1}}
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel master taskloop simd collapse(2.5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{integer constant expression}}
#pragma omp parallel master taskloop simd collapse(foo())
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop simd collapse(-5)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop simd collapse(0)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}}
#pragma omp parallel master taskloop simd collapse(5 - 5)
for (i = 0; i < 16; ++i)
;
}
void test_private() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected expression}}
// expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd private(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd private(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd private(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd private()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd private(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd private(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop simd private(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd private(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd private(x, y, z)
for (i = 0; i < 16; ++i) {
x = y * i + z;
}
}
void test_lastprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd lastprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd lastprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_firstprivate() {
int i;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}}
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(,
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(, )
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate()
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd firstprivate(int)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
// expected-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd firstprivate(0)
for (i = 0; i < 16; ++i)
;
int x, y, z;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x) firstprivate(x)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y) firstprivate(x, y)
for (i = 0; i < 16; ++i)
;
#pragma omp parallel
#pragma omp parallel master taskloop simd lastprivate(x, y, z) firstprivate(x, y, z)
for (i = 0; i < 16; ++i)
;
}
void test_loop_messages() {
float a[100], b[100], c[100];
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel master taskloop simd
for (float fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
#pragma omp parallel
// expected-error@+2 {{variable must be of integer or pointer type}}
#pragma omp parallel master taskloop simd
for (double fi = 0; fi < 10.0; fi++) {
c[(int)fi] = a[(int)fi] + b[(int)fi];
}
// expected-warning@+2 {{OpenMP loop iteration variable cannot have more than 64 bits size and will be narrowed}}
#pragma omp parallel master taskloop simd
for (__int128 ii = 0; ii < 10; ii++) {
c[ii] = a[ii] + b[ii];
}
}
void test_nontemporal() {
int i;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd nontemporal(
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 2 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd nontemporal(,
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 2 {{expected expression}}
#pragma omp parallel master taskloop simd nontemporal(, )
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd nontemporal()
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected expression}}
#pragma omp parallel master taskloop simd nontemporal(int)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} omp50-error@+1 {{expected variable name}}
#pragma omp parallel master taskloop simd nontemporal(0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'x'}}
#pragma omp parallel master taskloop simd nontemporal(x)
for (i = 0; i < 16; ++i)
;
// expected-error@+2 {{use of undeclared identifier 'x'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'y'}}
#pragma omp parallel master taskloop simd nontemporal(x, y)
for (i = 0; i < 16; ++i)
;
// expected-error@+3 {{use of undeclared identifier 'x'}}
// expected-error@+2 {{use of undeclared identifier 'y'}}
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{use of undeclared identifier 'z'}}
#pragma omp parallel master taskloop simd nontemporal(x, y, z)
for (i = 0; i < 16; ++i)
;
int x, y;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}}
#pragma omp parallel master taskloop simd nontemporal(x :)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}}
#pragma omp parallel master taskloop simd nontemporal(x :, )
for (i = 0; i < 16; ++i)
;
// omp50-note@+2 {{defined as nontemporal}}
// omp45-error@+1 2 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} omp50-error@+1 {{a variable cannot appear in more than one nontemporal clause}}
#pragma omp parallel master taskloop simd nontemporal(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd private(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd nontemporal(x) private(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}} expected-note@+1 {{to match this '('}} expected-error@+1 {{expected ',' or ')' in 'nontemporal' clause}} expected-error@+1 {{expected ')'}}
#pragma omp parallel master taskloop simd nontemporal(x, y : 0)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd nontemporal(x) lastprivate(x)
for (i = 0; i < 16; ++i)
;
// omp45-error@+1 {{unexpected OpenMP clause 'nontemporal' in directive '#pragma omp parallel master taskloop simd'}}
#pragma omp parallel master taskloop simd lastprivate(x) nontemporal(x)
for (i = 0; i < 16; ++i)
;
}
|
rose_outer_only.c | /* Only the outmost loop can be parallelized
*/
#include "omp.h"
void foo()
{
int n = 100;
int m = 100;
double b[n][m];
int i;
int j;
#pragma omp parallel for private (i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
for (j = 0; j <= m - 1; j += 1) {
b[i][j] = b[i][j - 1];
}
}
}
/*
Unparallelizable loop at line:9 due to the following dependencies:
1*1 TRUE_DEP DATA_DEP; commonlevel = 1 CarryLevel = 0 Is precise SgPntrArrRefExp:(b[i])[j]@10:14->SgPntrArrRefExp:((b[i])[j - 1])@10:19 == -1;||::
*/
|
sp.c | //-------------------------------------------------------------------------//
// //
// This benchmark is an OpenMP C version of the NPB SP code. This OpenMP //
// C version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the OpenMP Fortran versions in //
// "NPB3.3-OMP" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this OpenMP C version to //
// cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//---------------------------------------------------------------------
// program SP
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "header.h"
#include "print_results.h"
/* common /global/ */
int grid_points[3], nx2, ny2, nz2;
logical timeron;
/* common /constants/ */
double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
ce[5][13], dxmax, dymax, dzmax, xxcon1, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1,
dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2,
c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1, bt,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
/* common /fields/ */
double u [KMAX][JMAXP+1][IMAXP+1][5];
double us [KMAX][JMAXP+1][IMAXP+1];
double vs [KMAX][JMAXP+1][IMAXP+1];
double ws [KMAX][JMAXP+1][IMAXP+1];
double qs [KMAX][JMAXP+1][IMAXP+1];
double rho_i [KMAX][JMAXP+1][IMAXP+1];
double speed [KMAX][JMAXP+1][IMAXP+1];
double square [KMAX][JMAXP+1][IMAXP+1];
double rhs [KMAX][JMAXP+1][IMAXP+1][5];
double forcing[KMAX][JMAXP+1][IMAXP+1][5];
/* common /work_1d/ */
double cv [PROBLEM_SIZE];
double rhon[PROBLEM_SIZE];
double rhos[PROBLEM_SIZE];
double rhoq[PROBLEM_SIZE];
double cuf [PROBLEM_SIZE];
double q [PROBLEM_SIZE];
double ue [PROBLEM_SIZE][5];
double buf[PROBLEM_SIZE][5];
#pragma omp threadprivate(cv,rhon,rhos,rhoq,cuf,q,ue,buf)
/* common /work_lhs/ */
double lhs [IMAXP+1][IMAXP+1][5];
double lhsp[IMAXP+1][IMAXP+1][5];
double lhsm[IMAXP+1][IMAXP+1][5];
#pragma omp threadprivate(lhs,lhsp,lhsm)
//kai
int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11, k12, k13, k14, k15, k16;
int main(int argc, char *argv[])
{
//kai
// crucial_data(grid_points, "int", 3);
// crucial_data(ce,"double", 13*5);
crucial_data(u, "double", KMAX*(JMAXP+1)*(IMAXP+1)*5);
crucial_data(us, "double", KMAX*(JMAXP+1)*(IMAXP+1));
crucial_data(vs, "double", KMAX*(JMAXP+1)*(IMAXP+1));
crucial_data(ws, "double", KMAX*(JMAXP+1)*(IMAXP+1));
crucial_data(qs, "double", KMAX*(JMAXP+1)*(IMAXP+1));
crucial_data(rho_i, "double", KMAX*(JMAXP+1)*(IMAXP+1));
crucial_data(speed, "double", KMAX*(JMAXP+1)*(IMAXP+1));
crucial_data(square, "double", KMAX*(JMAXP+1)*(IMAXP+1));
//crucial_data(forcing, "double", KMAX*(JMAXP+1)*(IMAXP+1)*5);
crucial_data(rhs, "double", KMAX*(JMAXP+1)*(IMAXP+1)*5);
crucial_data(cv, "double", PROBLEM_SIZE);
crucial_data(rhon, "double", PROBLEM_SIZE);
crucial_data(rhos, "double", PROBLEM_SIZE);
crucial_data(rhoq, "double", PROBLEM_SIZE);
//crucial_data(cuf, "double", PROBLEM_SIZE);
//crucial_data(q, "double", PROBLEM_SIZE);
//crucial_data(ue, "double", (PROBLEM_SIZE)*5);
//crucial_data(buf, "double", (PROBLEM_SIZE)*5);
crucial_data(lhs, "double", (IMAXP+1)*(IMAXP+1)*5);
crucial_data(lhsp, "double", (IMAXP+1)*(IMAXP+1)*5);
crucial_data(lhsm, "double", (IMAXP+1)*(IMAXP+1)*5);
//int k1,k2,k3,k4,k5,k6,k7,k8,k9,k10, k11;
consistent_data(&k1, "int", 1);
consistent_data(&k2, "int", 1);
consistent_data(&k3, "int", 1);
consistent_data(&k4, "int", 1);
consistent_data(&k5, "int", 1);
consistent_data(&k6, "int", 1);
consistent_data(&k7, "int", 1);
consistent_data(&k8, "int", 1);
consistent_data(&k9, "int", 1);
consistent_data(&k10, "int", 1);
consistent_data(&k11, "int", 1);
consistent_data(&k12, "int", 1);
consistent_data(&k13, "int", 1);
consistent_data(&k14, "int", 1);
consistent_data(&k15, "int", 1);
consistent_data(&k16, "int", 1);
int i, niter, step, n3;
double mflops, t, tmax, trecs[t_last+1];
logical verified;
char Class;
char *t_names[t_last+1];
//---------------------------------------------------------------------
// Read input file (if it exists), else take
// defaults from parameters
//---------------------------------------------------------------------
FILE *fp;
if ((fp = fopen("timer.flag", "r")) != NULL) {
timeron = true;
t_names[t_total] = "total";
t_names[t_rhsx] = "rhsx";
t_names[t_rhsy] = "rhsy";
t_names[t_rhsz] = "rhsz";
t_names[t_rhs] = "rhs";
t_names[t_xsolve] = "xsolve";
t_names[t_ysolve] = "ysolve";
t_names[t_zsolve] = "zsolve";
t_names[t_rdis1] = "redist1";
t_names[t_rdis2] = "redist2";
t_names[t_tzetar] = "tzetar";
t_names[t_ninvr] = "ninvr";
t_names[t_pinvr] = "pinvr";
t_names[t_txinvr] = "txinvr";
t_names[t_add] = "add";
fclose(fp);
} else {
timeron = false;
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OMP-C) - SP Benchmark\n\n");
if ((fp = fopen("inputsp.data", "r")) != NULL) {
int result;
printf(" Reading from input file inputsp.data\n");
result = fscanf(fp, "%d", &niter);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%lf", &dt);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%d%d%d", &grid_points[0], &grid_points[1], &grid_points[2]);
fclose(fp);
} else {
printf(" No input file inputsp.data. Using compiled defaults\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
}
printf(" Size: %4dx%4dx%4d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %4d dt: %11.7f\n", niter, dt);
printf(" Number of available threads: %5d\n", omp_get_max_threads());
printf("\n");
if ((grid_points[0] > IMAX) ||
(grid_points[1] > JMAX) ||
(grid_points[2] > KMAX) ) {
printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
return 0;
}
nx2 = grid_points[0] - 2;
ny2 = grid_points[1] - 2;
nz2 = grid_points[2] - 2;
set_constants();
for (i = 1; i <= t_last; i++) {
timer_clear(i);
}
exact_rhs();
initialize();
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi();
initialize();
for (i = 1; i <= t_last; i++) {
timer_clear(i);
}
timer_start(1);
//kai
consistent_data(&step, "int", 1);
flush_whole_cache();
//start_crash();
for (step = 1; step <= niter; step++) {
if ((step % 20) == 0 || step == 1) {
printf(" Time step %4d\n", step);
}
if(step == 5)
start_crash();
if(step == 11)
end_crash();
// if (step != 2) {
adi();
// }
}
//kai
//end_crash();
timer_stop(1);
tmax = timer_read(1);
verify(niter, &Class, &verified);
if (tmax != 0.0) {
n3 = grid_points[0]*grid_points[1]*grid_points[2];
t = (grid_points[0]+grid_points[1]+grid_points[2])/3.0;
mflops = (881.174 * (double)n3
- 4683.91 * (t * t)
+ 11484.5 * t
- 19272.4) * (double)niter / (tmax*1000000.0);
} else {
mflops = 0.0;
}
print_results("SP", Class, grid_points[0],
grid_points[1], grid_points[2], niter,
tmax, mflops, " floating point",
verified, NPBVERSION,COMPILETIME, CS1, CS2, CS3, CS4, CS5,
CS6, "(none)");
//---------------------------------------------------------------------
// More timers
//---------------------------------------------------------------------
if (timeron) {
for (i = 1; i <= t_last; i++) {
trecs[i] = timer_read(i);
}
if (tmax == 0.0) tmax = 1.0;
printf(" SECTION Time (secs)\n");
for (i = 1; i <= t_last; i++) {
printf(" %-8s:%9.3f (%6.2f%%)\n",
t_names[i], trecs[i], trecs[i]*100./tmax);
if (i == t_rhs) {
t = trecs[t_rhsx] + trecs[t_rhsy] + trecs[t_rhsz];
printf(" --> %8s:%9.3f (%6.2f%%)\n", "sub-rhs", t, t*100./tmax);
t = trecs[t_rhs] - t;
printf(" --> %8s:%9.3f (%6.2f%%)\n", "rest-rhs", t, t*100./tmax);
} else if (i == t_zsolve) {
t = trecs[t_zsolve] - trecs[t_rdis1] - trecs[t_rdis2];
printf(" --> %8s:%9.3f (%6.2f%%)\n", "sub-zsol", t, t*100./tmax);
} else if (i == t_rdis2) {
t = trecs[t_rdis1] + trecs[t_rdis2];
printf(" --> %8s:%9.3f (%6.2f%%)\n", "redist", t, t*100./tmax);
}
}
}
return 0;
}
|
j3d27pt.gold.h | #include <cstring>
using std::memcpy;
template <class T>
void jacobi_gold(T *fout, const T *fin, double h2inv, double a, double b, int L, int M, int N) {
double (*out)[M][N] = (double (*)[M][N]) fout;
double (*in)[M][N] = (double (*)[M][N]) fin;
auto ftemp1 = new T[L * M * N];
auto ftemp2 = new T[L * M * N];
memset(ftemp1, 0, sizeof(T)*L*M*N);
memset(ftemp2, 0, sizeof(T)*L*M*N);
double (*temp1)[M][N] = (T (*)[M][N]) ftemp1;
double (*temp2)[M][N] = (T (*)[M][N]) ftemp2;
memcpy(ftemp1, fin, sizeof(T)*L*M*N);
double c = b * h2inv;
double d = c * 0.5;
double e = c * 0.125;
double f = c * 0.3;
for (int t = 0; t < 6; t++) {
#pragma omp parallel for
for (int k = 1; k < L - 1; ++k) {
for (int j = 1; j < M - 1; ++j) {
for (int i = 1; i < N - 1; ++i) {
if (!(t%2)) {
temp2[k][j][i] = a*temp1[k][j][i] -
d*(temp1[k-1][j-1][i-1] +
temp1[k-1][j-1][i+1] +
temp1[k-1][j+1][i-1] +
temp1[k-1][j+1][i+1] +
temp1[k+1][j-1][i-1] +
temp1[k+1][j-1][i+1] +
temp1[k+1][j+1][i-1] +
temp1[k+1][j+1][i+1]) +
e*(temp1[k-1][j-1][i] +
temp1[k-1][j][i-1] +
temp1[k-1][j][i+1] +
temp1[k-1][j+1][i] +
temp1[k][j-1][i-1] +
temp1[k][j-1][i+1] +
temp1[k][j+1][i-1] +
temp1[k][j+1][i+1] +
temp1[k+1][j-1][i] +
temp1[k+1][j][i-1] +
temp1[k+1][j][i+1] +
temp1[k][j+1][i]) +
f*(temp1[k-1][j][i] +
temp1[k][j-1][i] +
temp1[k][j][i-1] +
temp1[k][j][i+1] +
temp1[k][j+1][i] +
temp1[k+1][j][i]) +
0.13*temp1[k][j][i];
} else {
temp1[k][j][i] = a*temp2[k][j][i] -
d*(temp2[k-1][j-1][i-1] +
temp2[k-1][j-1][i+1] +
temp2[k-1][j+1][i-1] +
temp2[k-1][j+1][i+1] +
temp2[k+1][j-1][i-1] +
temp2[k+1][j-1][i+1] +
temp2[k+1][j+1][i-1] +
temp2[k+1][j+1][i+1]) +
e*(temp2[k-1][j-1][i] +
temp2[k-1][j][i-1] +
temp2[k-1][j][i+1] +
temp2[k-1][j+1][i] +
temp2[k][j-1][i-1] +
temp2[k][j-1][i+1] +
temp2[k][j+1][i-1] +
temp2[k][j+1][i+1] +
temp2[k+1][j-1][i] +
temp2[k+1][j][i-1] +
temp2[k+1][j][i+1] +
temp2[k][j+1][i]) +
f*(temp2[k-1][j][i] +
temp2[k][j-1][i] +
temp2[k][j][i-1] +
temp2[k][j][i+1] +
temp2[k][j+1][i] +
temp2[k+1][j][i]) +
0.13*temp2[k][j][i];
}
}
}
}
}
memcpy(fout, ftemp1, sizeof(T)*L*M*N);
}
|
GxB_Descriptor_get.c | //------------------------------------------------------------------------------
// GxB_Descriptor_get: get a field in a descriptor
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Use GxB_Desc_get instead; this is kept for backward compatibility.
#include "GB.h"
GrB_Info GxB_Descriptor_get // get a parameter from a descriptor
(
GrB_Desc_Value *val, // value of the parameter
GrB_Descriptor desc, // descriptor to query; NULL is ok
GrB_Desc_Field field // parameter to query
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
GB_WHERE1 ("GxB_Descriptor_get (&value, desc, field)") ;
GB_RETURN_IF_NULL (val) ;
GB_RETURN_IF_FAULTY (desc) ;
//--------------------------------------------------------------------------
// get the parameter
//--------------------------------------------------------------------------
switch (field)
{
case GrB_OUTP :
(*val) = (desc == NULL) ? GxB_DEFAULT : desc->out ; break ;
case GrB_MASK :
(*val) = (desc == NULL) ? GxB_DEFAULT : desc->mask ; break ;
case GrB_INP0 :
(*val) = (desc == NULL) ? GxB_DEFAULT : desc->in0 ; break ;
case GrB_INP1 :
(*val) = (desc == NULL) ? GxB_DEFAULT : desc->in1 ; break ;
case GxB_AxB_METHOD :
(*val) = (desc == NULL) ? GxB_DEFAULT : desc->axb ; break ;
default :
return (GrB_INVALID_VALUE) ;
}
#pragma omp flush
return (GrB_SUCCESS) ;
}
|
cpu_stream.h | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#define ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
#include "oneflow/core/ep/include/stream.h"
#include "oneflow/core/ep/cpu/cpu_device.h"
#define OF_RUNTIME_SEQ 0u
#define OF_RUNTIME_OMP 1u
#define OF_RUNTIME_TBB 2u
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
#include <omp.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
#include <tbb/blocked_range.h>
#include <tbb/parallel_for.h>
#include <tbb/global_control.h>
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
// Nothing
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
#ifdef WITH_ONEDNN
#include <oneapi/dnnl/dnnl.hpp>
#endif
namespace oneflow {
namespace ep {
class CpuNumThreadsGuard {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuNumThreadsGuard);
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
explicit CpuNumThreadsGuard(size_t num_threads)
: global_thread_limit(tbb::global_control::max_allowed_parallelism, num_threads) {}
~CpuNumThreadsGuard() {}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
explicit CpuNumThreadsGuard(size_t num_threads) : set_num_threads_(num_threads) {
saved_num_threads_ = omp_get_max_threads();
omp_set_num_threads(set_num_threads_);
}
~CpuNumThreadsGuard() { omp_set_num_threads(saved_num_threads_); }
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
explicit CpuNumThreadsGuard(size_t num_threads) {}
~CpuNumThreadsGuard() {}
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
private:
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
tbb::global_control global_thread_limit;
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
size_t set_num_threads_;
size_t saved_num_threads_;
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
};
#ifdef WITH_ONEDNN
class OneDnnExecutor;
#endif
class CpuStream : public Stream {
public:
OF_DISALLOW_COPY_AND_MOVE(CpuStream);
explicit CpuStream(CpuDevice* device) : device_(device) {
#ifdef WITH_ONEDNN
onednn_executor_ = std::make_unique<ep::OneDnnExecutor>(this);
#endif
}
~CpuStream() override = default;
DeviceType device_type() const override;
CpuDevice* device() const override;
Maybe<void> Sync() override;
void RecordEvent(Event* event) override;
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func) {
ParallelFor(begin, end, func, kParallelForDefaultGrain);
}
template<typename F>
void ParallelFor(int64_t begin, int64_t end, const F& func, size_t grain_size) {
#if OF_CPU_THREADING_RUNTIME != OF_RUNTIME_SEQ
auto DivUp = [](int64_t x, int64_t y) { return (x + y - 1) / y; };
size_t num_threads = device()->GetNumThreads();
#endif
if (begin >= end) { return; }
#if OF_CPU_THREADING_RUNTIME == OF_RUNTIME_OMP
if (grain_size > 0) {
num_threads = std::min(num_threads, (size_t)(DivUp((end - begin), grain_size)));
} else {
num_threads = 1;
}
#pragma omp parallel num_threads(num_threads)
{
int64_t omp_num_thread = omp_get_num_threads();
int64_t chunk_size = DivUp((end - begin), omp_num_thread);
int64_t omp_tid = omp_get_thread_num();
int64_t thread_begin_index = begin + omp_tid * chunk_size;
int64_t thread_end_index = std::min(end, chunk_size + thread_begin_index);
if (thread_begin_index < end) { func(thread_begin_index, thread_end_index); }
}
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_TBB
CpuNumThreadsGuard guard(num_threads);
size_t tmp_chunk_size = DivUp((end - begin), num_threads);
int64_t chunk_size = std::max(tmp_chunk_size, grain_size);
tbb::parallel_for(
tbb::blocked_range<int64_t>(begin, end, chunk_size),
[func](const tbb::blocked_range<int64_t>& r) { func(r.begin(), r.end()); },
tbb::static_partitioner{});
#elif OF_CPU_THREADING_RUNTIME == OF_RUNTIME_SEQ
func(begin, end);
#else
#error OF_CPU_THREADING_RUNTIME Error setting
#endif
}
#ifdef WITH_ONEDNN
const std::unique_ptr<ep::OneDnnExecutor>& onednn_executor() const;
#endif
private:
CpuDevice* device_;
static constexpr size_t kParallelForDefaultGrain = 32768;
#ifdef WITH_ONEDNN
std::unique_ptr<ep::OneDnnExecutor> onednn_executor_;
#endif
};
#ifdef WITH_ONEDNN
class OneDnnExecutor {
public:
OF_DISALLOW_COPY_AND_MOVE(OneDnnExecutor);
OneDnnExecutor() = delete;
explicit OneDnnExecutor(CpuStream* cpu_stream) : cpu_stream_(cpu_stream) {
engine_.reset(new dnnl::engine(dnnl::engine::kind::cpu, 0));
stream_.reset(new dnnl::stream(*engine_));
}
~OneDnnExecutor() = default;
template<typename F>
void Launch(const F& f) {
CpuNumThreadsGuard guard(cpu_stream_->device()->GetNumThreads());
f(engine_.get(), stream_.get());
stream_->wait();
}
private:
CpuStream* cpu_stream_ = nullptr;
std::unique_ptr<dnnl::engine> engine_;
std::unique_ptr<dnnl::stream> stream_;
};
#endif
} // namespace ep
} // namespace oneflow
#endif // ONEFLOW_CORE_EP_CPU_CPU_STREAM_H_
|
cpu_ctc.h | #pragma once
#include <tuple>
#include <cmath>
#include <limits>
#include <algorithm>
#include <numeric>
#if !defined(CTC_DISABLE_OMP) && !defined(APPLE)
#include <omp.h>
#endif
#include "ctc_helper.h"
template<typename ProbT>
class CpuCTC {
public:
// Noncopyable
CpuCTC(int alphabet_size, int minibatch, void* workspace, int num_threads,
int blank_label) :
alphabet_size_(alphabet_size), minibatch_(minibatch),
num_threads_(num_threads), workspace_(workspace),
blank_label_(blank_label) {
#if defined(CTC_DISABLE_OMP) || defined(APPLE)
#else
if (num_threads > 0) {
omp_set_num_threads(num_threads);
} else {
num_threads_ = omp_get_max_threads();
}
#endif
};
CpuCTC(const CpuCTC&) = delete;
CpuCTC& operator=(const CpuCTC&) = delete;
ctcStatus_t cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
ctcStatus_t score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths);
private:
class CpuCTC_metadata {
private:
int setup_labels(const int* const labels, int blank_label, int L, int S);
public:
CpuCTC_metadata(int L, int S, int T, int mb, int alphabet_size,
void* workspace, size_t bytes_used, int blank_label,
const int* const labels);
ProbT* alphas;
ProbT* betas;
int* labels_w_blanks;
int* e_inc;
int* s_inc;
ProbT* output;
int repeats;
};
int alphabet_size_; // Number of characters plus blank
int minibatch_;
int num_threads_;
int blank_label_;
void* workspace_;
void softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths);
std::tuple<ProbT, bool>
cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels, int T, int L,
int mb, size_t bytes_used);
ProbT compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas);
ProbT compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output);
};
template<typename ProbT>
CpuCTC<ProbT>::CpuCTC_metadata::CpuCTC_metadata(int L, int S, int T, int mb,
int alphabet_size,
void* workspace, size_t bytes_used,
int blank_label,
const int* const labels) {
alphas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S * T;
std::fill(alphas, alphas + S * T, ctc_helper::neg_inf<ProbT>());
betas = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * S;
std::fill(betas, betas + S, ctc_helper::neg_inf<ProbT>());
labels_w_blanks = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
e_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
s_inc = reinterpret_cast<int *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(int) * S;
output = reinterpret_cast<ProbT *>(static_cast<char *>(workspace) + bytes_used);
bytes_used += sizeof(ProbT) * alphabet_size;
repeats = setup_labels(labels, blank_label, L, S);
}
template<typename ProbT>
int CpuCTC<ProbT>::CpuCTC_metadata::setup_labels(const int* const labels,
int blank_label, int L, int S) {
int e_counter = 0;
int s_counter = 0;
s_inc[s_counter++] = 1;
int repeats = 0;
for (int i = 1; i < L; ++i) {
if (labels[i-1] == labels[i]) {
s_inc[s_counter++] = 1;
s_inc[s_counter++] = 1;
e_inc[e_counter++] = 1;
e_inc[e_counter++] = 1;
++repeats;
}
else {
s_inc[s_counter++] = 2;
e_inc[e_counter++] = 2;
}
}
e_inc[e_counter++] = 1;
for (int i = 0; i < L; ++i) {
labels_w_blanks[2 * i] = blank_label;
labels_w_blanks[2 * i + 1] = labels[i];
}
labels_w_blanks[S - 1] = blank_label;
return repeats;
}
template<typename ProbT>
void
CpuCTC<ProbT>::softmax(const ProbT* const activations, ProbT* probs,
const int* const input_lengths) {
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
for(int c = 0; c < input_lengths[mb]; ++c) {
int col_offset = (mb + minibatch_ * c) * alphabet_size_;
ProbT max_activation = -std::numeric_limits<ProbT>::infinity();
for(int r = 0; r < alphabet_size_; ++r)
max_activation = std::max(max_activation, activations[r + col_offset]);
ProbT denom = ProbT(0.);
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] = std::exp(activations[r + col_offset] - max_activation);
denom += probs[r + col_offset];
}
for(int r = 0; r < alphabet_size_; ++r) {
probs[r + col_offset] /= denom;
}
}
}
}
template<typename ProbT>
std::tuple<ProbT, bool>
CpuCTC<ProbT>::cost_and_grad_kernel(ProbT *grad, const ProbT* const probs,
const int* const labels,
int T, int L, int mb, size_t bytes_used) {
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_, bytes_used, blank_label_, labels);
bool over_threshold = false;
if (L + ctcm.repeats > T) {
return std::make_tuple(ProbT(0), over_threshold); // TODO, not right to return 0
}
ProbT llForward = compute_alphas(probs, ctcm.repeats, S, T, ctcm.e_inc,
ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
ProbT llBackward = compute_betas_and_grad(grad, probs, llForward, ctcm.repeats,
S, T, ctcm.e_inc, ctcm.s_inc,
ctcm.labels_w_blanks,
ctcm.alphas,
ctcm.betas,
ctcm.output);
ProbT diff = std::abs(llForward - llBackward);
if (diff > ctc_helper::threshold) {
over_threshold = true;
}
return std::make_tuple(-llForward, over_threshold);
}
// Computes forward probabilities
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_alphas(const ProbT* probs, int repeats, int S, int T,
const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas) {
int start = (((S /2) + repeats - T) < 0) ? 0 : 1,
end = S > 1 ? 2 : 1;
for (int i = start; i < end; ++i) {
alphas[i] = std::log(probs[labels[i]]);
}
for(int t = 1; t < T; ++t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= 0)
start += s_inc[remain];
if(t <= (S / 2) + repeats)
end += e_inc[t - 1];
int startloop = start;
int idx1 = t * S, idx2 = (t - 1) * S, idx3 = t * (alphabet_size_ * minibatch_);
if (start == 0) {
alphas[idx1] = alphas[idx2] + std::log(probs[blank_label_ + idx3]);
startloop += 1;
}
for(int i = startloop; i < end; ++i) {
ProbT prev_sum = ctc_helper::log_plus<ProbT>()(alphas[i + idx2], alphas[(i-1) + idx2]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != 1 && labels[i] != labels[i-2])
prev_sum = ctc_helper::log_plus<ProbT>()(prev_sum, alphas[(i-2) + idx2]);
alphas[i + idx1] = prev_sum + std::log(probs[labels[i] + idx3]);
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, alphas[i + (T - 1) * S]);
}
return loglike;
}
// Starting from T, we sweep backward over the alpha array computing one column
// of betas as we go. At each position we can update product alpha * beta and then
// sum into the gradient associated with each label.
// NOTE computes gradient w.r.t UNNORMALIZED final layer activations.
// Assumed passed in grads are already zeroed!
template<typename ProbT>
ProbT CpuCTC<ProbT>::compute_betas_and_grad(ProbT* grad, const ProbT* const probs,
ProbT log_partition, int repeats,
int S, int T, const int* const e_inc,
const int* const s_inc,
const int* const labels,
ProbT* alphas,
ProbT* betas,
ProbT* output) {
int start = S > 1 ? (S - 2) : 0,
end = (T > (S / 2) + repeats) ? S : S-1;
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
//set the starting values in the beta column at the very right edge
for (int i = start; i < end; ++i) {
betas[i] = std::log(probs[labels[i] + (T - 1) * (alphabet_size_ * minibatch_)]);
//compute alpha * beta in log space at this position in (S, T) space
alphas[i + (T - 1) * S] += betas[i];
//update the gradient associated with this label
//essentially performing a reduce-by-key in a sequential manner
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + (T - 1) * S], output[labels[i]]);
}
//update the gradient wrt to each unique label
for (int i = 0; i < alphabet_size_; ++i) {
int idx3 = (T - 1) * alphabet_size_ * minibatch_ + i;
if (/*output[i] == 0.0 || */output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
}
//loop from the second to last column all the way to the left
for(int t = T - 2; t >= 0; --t) {
int remain = (S / 2) + repeats - (T - t);
if(remain >= -1)
start -= s_inc[remain + 1];
if(t < (S / 2) + repeats)
end -= e_inc[t];
int endloop = end == S ? end - 1 : end;
int idx1 = t * S, idx3 = t * (alphabet_size_ * minibatch_);
std::fill(output, output + alphabet_size_, ctc_helper::neg_inf<ProbT>());
for(int i = start; i < endloop; ++i) {
ProbT next_sum = ctc_helper::log_plus<ProbT>()(betas[i], betas[(i+1)]);
// Skip two if not on blank and not on repeat.
if (labels[i] != blank_label_ && i != (S-2) && labels[i] != labels[i+2]){
next_sum = ctc_helper::log_plus<ProbT>()(next_sum, betas[(i+2)]);
}
betas[i] = next_sum + std::log(probs[labels[i] + idx3]);
//compute alpha * beta in log space
alphas[i + idx1] += betas[i];
//update the gradient associated with this label
output[labels[i]] =
ctc_helper::log_plus<ProbT>()(alphas[i + idx1], output[labels[i]]);
}
if (end == S) {
betas[(S-1)] = betas[(S-1)] + std::log(probs[blank_label_ + idx3]);
alphas[(S-1) + idx1] += betas[(S-1)];
output[labels[S-1]] =
ctc_helper::log_plus<ProbT>()(alphas[S-1 + idx1], output[labels[S-1]]);
}
//go over the unique labels and compute the final grad
// wrt to each one at this time step
for (int i = 0; i < alphabet_size_; ++i) {
if (/*output[i] == 0.0 || */output[i] == ctc_helper::neg_inf<ProbT>() ||
probs[idx3] == 0.0) {
grad[idx3] = probs[idx3];
} else {
grad[idx3] = probs[idx3] - std::exp(output[i] -
std::log(probs[idx3]) - log_partition);
}
++idx3;
}
}
ProbT loglike = ctc_helper::neg_inf<ProbT>();
for(int i = start; i < end; ++i) {
loglike = ctc_helper::log_plus<ProbT>()(loglike, betas[i]);
}
return loglike;
}
template<typename ProbT>
ctcStatus_t
CpuCTC<ProbT>::cost_and_grad(const ProbT* const activations,
ProbT *grads,
ProbT *costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
grads == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);;
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
bool mb_status;
std::tie(costs[mb], mb_status) =
cost_and_grad_kernel(grads + mb * alphabet_size_,
probs + mb * alphabet_size_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0),
T, L, mb,
bytes_used + mb * per_minibatch_bytes);
}
return CTC_STATUS_SUCCESS;
}
template<typename ProbT>
ctcStatus_t CpuCTC<ProbT>::score_forward(const ProbT* const activations,
ProbT* costs,
const int* const flat_labels,
const int* const label_lengths,
const int* const input_lengths) {
if (activations == nullptr ||
costs == nullptr ||
flat_labels == nullptr ||
label_lengths == nullptr ||
input_lengths == nullptr
)
return CTC_STATUS_INVALID_VALUE;
ProbT* probs = static_cast<ProbT *>(workspace_);
int maxT = *std::max_element(input_lengths, input_lengths + minibatch_);
size_t bytes_used = sizeof(ProbT) * minibatch_ * alphabet_size_ * maxT;
//per minibatch memory
size_t per_minibatch_bytes = 0;
int maxL = *std::max_element(label_lengths, label_lengths + minibatch_);
int maxS = 2 * maxL + 1;
//output
per_minibatch_bytes += sizeof(float) * alphabet_size_;
//alphas
per_minibatch_bytes += sizeof(float) * maxS * maxT;
//betas
per_minibatch_bytes += sizeof(float) * maxS;
//labels w/blanks, e_inc, s_inc
per_minibatch_bytes += 3 * sizeof(int) * maxS;
softmax(activations, probs, input_lengths);
#pragma omp parallel for
for (int mb = 0; mb < minibatch_; ++mb) {
const int T = input_lengths[mb]; // Length of utterance (time)
const int L = label_lengths[mb]; // Number of labels in transcription
const int S = 2*L + 1; // Number of labels with blanks
CpuCTC_metadata ctcm(L, S, T, mb, alphabet_size_, workspace_,
bytes_used + mb * per_minibatch_bytes, blank_label_,
flat_labels + std::accumulate(label_lengths, label_lengths + mb, 0));
if (L + ctcm.repeats > T)
costs[mb] = ProbT(0);
else {
costs[mb] = -compute_alphas(probs + mb * alphabet_size_, ctcm.repeats, S, T,
ctcm.e_inc, ctcm.s_inc, ctcm.labels_w_blanks,
ctcm.alphas);
}
}
return CTC_STATUS_SUCCESS;
}
|
3d7pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 32;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,12);t1++) {
lbp=max(ceild(t1,2),ceild(24*t1-Nt+3,24));
ubp=min(floord(Nt+Nz-4,24),floord(12*t1+Nz+9,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(3*t1-7,8)),ceild(24*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(12*t1+Ny+21,32)),floord(24*t2+Ny+20,32)),floord(24*t1-24*t2+Nz+Ny+19,32));t3++) {
for (t4=max(max(max(0,ceild(3*t1-63,64)),ceild(24*t2-Nz-252,256)),ceild(32*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(12*t1+Nx+21,256)),floord(24*t2+Nx+20,256)),floord(32*t3+Nx+28,256)),floord(24*t1-24*t2+Nz+Nx+19,256));t4++) {
for (t5=max(max(max(max(max(0,12*t1),24*t1-24*t2+1),24*t2-Nz+2),32*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,12*t1+23),24*t2+22),32*t3+30),256*t4+254),24*t1-24*t2+Nz+21);t5++) {
for (t6=max(max(24*t2,t5+1),-24*t1+24*t2+2*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) {
lbv=max(256*t4,t5+1);
ubv=min(256*t4+255,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
mpiCodeGenerator.h | /*
*
*
* V 0.2 using real frontend parser and dedicated OpenMP-like AST nodes for program representation
* This is necessary to parse complex extended map clause with dist_data info.
* The previous version's MPI_PragmaAttribute is no longer used.
*
* Liao 12/11/2015
*
* V 0.1
* Parsing pragmas and generating MPI code from input sequential code
* Pragma is OpenMP style, reusing OmpAttribute to store information
* As a experiments, a lightweight recursive descendent parser is used to parse the pragmas
* Liao 9/22/2015
* */
#ifndef MPI_Code_Generator_h
#define MPI_Code_Generator_h
#include <vector>
#include <string>
namespace MPI_Code_Generator
{
//------------ v 0.2 interface, expecting the extended ROSE frontend to parse and create OpenMP AST nodes
// using -rose:openmp:ast_only command line option to active the frontend support
void lower_xomp (SgSourceFile* file);
// Translate target device(mpi:master) begin ...
void transMPIDeviceMaster (SgOmpTargetStatement * t_stmt);
void transOmpTargetParallelLoop (SgOmpForStatement* loop);
std::set<SgSymbol* > transOmpMapVariables (SgOmpTargetStatement* );
// convert a C data type into MPI type name
std::string C2MPITypeName (SgType*);
//--------------- v 0.1 interface, no longer being used.
class MPI_PragmaAttribute;
//int generateMPI (SgSourceFile* sfile);
//! A prototype parser for directives guiding MPI code generation
void parsePragmas(SgSourceFile* sfile, std::vector <MPI_PragmaAttribute*>& MPI_Pragma_Attribute_List);
//! Translate generated Pragma Attributes
void translatePragmas (std::vector <MPI_PragmaAttribute*>& MPI_Pragma_Attribute_List);
//! Setup MPI initialization
void setupMPIInit(SgSourceFile* sfile);
//! Setup MPI finalize
void setupMPIFinalize(SgSourceFile* sfile);
// pragma enum values.
// For quick prototyping, we use AstAttributes instead of dedicated AST nodes for storing parsed results.
enum mpi_pragma_enum {
// for main function, what is the default semantics for code if no directives are present ?
// run by all processes (spmd) vs. run only by master process, or must be explicitly declared ( device (mpi:all))
// #pragma omp mpi_device_default(mpi:all|mpi:master|explicit)
e_mpi_all,
e_mpi_master,
e_semantics_explicit,
//#pragma omp mpi_device_default(mpi:all|mpi:master|explicit)
pragma_mpi_device_default,
//#pragma omp target device(mpi:all) begin
pragma_mpi_device_all_begin,
//#pragma omp target device(mpi:all) end
pragma_mpi_device_all_end,
// #pragma omp target device(mpi:master) begin
pragma_mpi_device_master_begin,
// #pragma omp target device(mpi:master) end
pragma_mpi_device_master_end,
// pragma omp target device(mpi:all) map ( dist_data)
pragma_mpi_device_all_map_dist,
//#pragma omp parallel for
pragma_parallel_for,
pragma_last
};
// Global settings for the code generation
extern mpi_pragma_enum mpi_device_default_choice;
class MPI_PragmaAttribute: public AstAttribute
{
public:
SgPragmaDeclaration* pragma_node; // the associated AST node for pragma
enum mpi_pragma_enum pragma_type;
enum mpi_pragma_enum default_semantics;
MPI_PragmaAttribute (SgPragmaDeclaration* n , mpi_pragma_enum p_type): pragma_node(n), pragma_type(p_type)
{ default_semantics = e_semantics_explicit; }
// convert the attribute back to string format
std::string toString();
}; // end class
// parse a single pragma declaration, internal use only
extern AstAttribute* parse_MPI_Pragma (SgPragmaDeclaration* pragmaDecl);
// parse pragmas in an input file
void parsePragmas(SgSourceFile* sfile);
} // end namespace
#endif //MPI_Code_Generator_h
|
ast-dump-openmp-target-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:4:1, col:37>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:1) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:10:1, col:37>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:17:1, col:49>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:24:1, col:49>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:1) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetParallelForSimdDirective {{.*}} <line:31:1, col:49>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:47> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:1> col:1 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:1> col:1 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:1> col:1 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:1) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
spectral_sequence_reduction.h | /* Copyright 2013 IST Austria
Contributed by: Jan Reininghaus
This file is part of PHAT.
PHAT is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PHAT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with PHAT. If not, see <http://www.gnu.org/licenses/>. */
#pragma once
#include <phat/helpers/misc.h>
#include <phat/boundary_matrix.h>
namespace phat {
class spectral_sequence_reduction {
public:
template< typename Representation >
void operator () ( boundary_matrix< Representation >& boundary_matrix ) {
const index nr_columns = boundary_matrix.get_num_cols();
std::vector< index > lowest_one_lookup( nr_columns, -1 );
//const index num_stripes = (index) sqrt( (double)nr_columns );
const index num_stripes = omp_get_max_threads();
index block_size = ( nr_columns % num_stripes == 0 ) ? nr_columns / num_stripes : block_size = nr_columns / num_stripes + 1;
std::vector< std::vector< index > > unreduced_cols_cur_pass( num_stripes );
std::vector< std::vector< index > > unreduced_cols_next_pass( num_stripes );
for( index cur_dim = boundary_matrix.get_max_dim(); cur_dim >= 1 ; cur_dim-- ) {
#pragma omp parallel for schedule( guided, 1 )
for( index cur_stripe = 0; cur_stripe < num_stripes; cur_stripe++ ) {
index col_begin = cur_stripe * block_size;
index col_end = std::min( (cur_stripe+1) * block_size, nr_columns );
for( index cur_col = col_begin; cur_col < col_end; cur_col++ )
if( boundary_matrix.get_dim( cur_col ) == cur_dim && boundary_matrix.get_max_index( cur_col ) != -1 )
unreduced_cols_cur_pass[ cur_stripe ].push_back( cur_col );
}
for( index cur_pass = 0; cur_pass < num_stripes; cur_pass++ ) {
boundary_matrix.sync();
#pragma omp parallel for schedule( guided, 1 )
for( int cur_stripe = 0; cur_stripe < num_stripes; cur_stripe++ ) {
index row_begin = (cur_stripe - cur_pass) * block_size;
index row_end = row_begin + block_size;
unreduced_cols_next_pass[ cur_stripe ].clear();
for( index idx = 0; idx < (index)unreduced_cols_cur_pass[ cur_stripe ].size(); idx++ ) {
index cur_col = unreduced_cols_cur_pass[ cur_stripe ][ idx ];
index lowest_one = boundary_matrix.get_max_index( cur_col );
while( lowest_one != -1 && lowest_one >= row_begin && lowest_one < row_end && lowest_one_lookup[ lowest_one ] != -1 ) {
boundary_matrix.add_to( lowest_one_lookup[ lowest_one ], cur_col );
lowest_one = boundary_matrix.get_max_index( cur_col );
}
if( lowest_one != -1 ) {
if( lowest_one >= row_begin && lowest_one < row_end ) {
lowest_one_lookup[ lowest_one ] = cur_col;
boundary_matrix.clear( lowest_one );
boundary_matrix.finalize( cur_col );
} else {
unreduced_cols_next_pass[ cur_stripe ].push_back( cur_col );
}
}
}
unreduced_cols_next_pass[ cur_stripe ].swap( unreduced_cols_cur_pass[ cur_stripe ] );
}
}
}
}
};
}
|
msgmerge.c | /* GNU gettext - internationalization aids
Copyright (C) 1995-1998, 2000-2010, 2012, 2015-2016 Free Software
Foundation, Inc.
This file was written by Peter Miller <millerp@canb.auug.org.au>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include <alloca.h>
#include <getopt.h>
#include <limits.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <locale.h>
#include "closeout.h"
#include "dir-list.h"
#include "error.h"
#include "error-progname.h"
#include "progname.h"
#include "relocatable.h"
#include "basename.h"
#include "message.h"
#include "read-catalog.h"
#include "read-po.h"
#include "read-properties.h"
#include "read-stringtable.h"
#include "write-catalog.h"
#include "write-po.h"
#include "write-properties.h"
#include "write-stringtable.h"
#include "color.h"
#include "format.h"
#include "xalloc.h"
#include "xmalloca.h"
#include "obstack.h"
#include "c-strstr.h"
#include "c-strcase.h"
#include "po-charset.h"
#include "msgl-iconv.h"
#include "msgl-equal.h"
#include "msgl-fsearch.h"
#include "glthread/lock.h"
#include "lang-table.h"
#include "plural-exp.h"
#include "plural-count.h"
#include "msgl-check.h"
#include "po-xerror.h"
#include "backupfile.h"
#include "copy-file.h"
#include "propername.h"
#include "gettext.h"
#define _(str) gettext (str)
#define obstack_chunk_alloc xmalloc
#define obstack_chunk_free free
/* If true do not print unneeded messages. */
static bool quiet;
/* Verbosity level. */
static int verbosity_level;
/* Force output of PO file even if empty. */
static int force_po;
/* Apply the .pot file to each of the domains in the PO file. */
static bool multi_domain_mode = false;
/* Determines whether to use fuzzy matching. */
static bool use_fuzzy_matching = true;
/* Determines whether to keep old msgids as previous msgids. */
static bool keep_previous = false;
/* Language (ISO-639 code) and optional territory (ISO-3166 code). */
static const char *catalogname = NULL;
/* List of user-specified compendiums. */
static message_list_list_ty *compendiums;
/* List of corresponding filenames. */
static string_list_ty *compendium_filenames;
/* Update mode. */
static bool update_mode = false;
static const char *version_control_string;
static const char *backup_suffix_string;
/* Long options. */
static const struct option long_options[] =
{
{ "add-location", optional_argument, NULL, 'n' },
{ "backup", required_argument, NULL, CHAR_MAX + 1 },
{ "color", optional_argument, NULL, CHAR_MAX + 9 },
{ "compendium", required_argument, NULL, 'C', },
{ "directory", required_argument, NULL, 'D' },
{ "escape", no_argument, NULL, 'E' },
{ "force-po", no_argument, &force_po, 1 },
{ "help", no_argument, NULL, 'h' },
{ "indent", no_argument, NULL, 'i' },
{ "lang", required_argument, NULL, CHAR_MAX + 8 },
{ "multi-domain", no_argument, NULL, 'm' },
{ "no-escape", no_argument, NULL, 'e' },
{ "no-fuzzy-matching", no_argument, NULL, 'N' },
{ "no-location", no_argument, NULL, CHAR_MAX + 11 },
{ "no-wrap", no_argument, NULL, CHAR_MAX + 4 },
{ "output-file", required_argument, NULL, 'o' },
{ "previous", no_argument, NULL, CHAR_MAX + 7 },
{ "properties-input", no_argument, NULL, 'P' },
{ "properties-output", no_argument, NULL, 'p' },
{ "quiet", no_argument, NULL, 'q' },
{ "sort-by-file", no_argument, NULL, 'F' },
{ "sort-output", no_argument, NULL, 's' },
{ "silent", no_argument, NULL, 'q' },
{ "strict", no_argument, NULL, CHAR_MAX + 2 },
{ "stringtable-input", no_argument, NULL, CHAR_MAX + 5 },
{ "stringtable-output", no_argument, NULL, CHAR_MAX + 6 },
{ "style", required_argument, NULL, CHAR_MAX + 10 },
{ "suffix", required_argument, NULL, CHAR_MAX + 3 },
{ "update", no_argument, NULL, 'U' },
{ "verbose", no_argument, NULL, 'v' },
{ "version", no_argument, NULL, 'V' },
{ "width", required_argument, NULL, 'w', },
{ NULL, 0, NULL, 0 }
};
struct statistics
{
size_t merged;
size_t fuzzied;
size_t missing;
size_t obsolete;
};
/* Forward declaration of local functions. */
static void usage (int status)
#if defined __GNUC__ && ((__GNUC__ == 2 && __GNUC_MINOR__ >= 5) || __GNUC__ > 2)
__attribute__ ((noreturn))
#endif
;
static void compendium (const char *filename);
static void msgdomain_list_stablesort_by_obsolete (msgdomain_list_ty *mdlp);
static msgdomain_list_ty *merge (const char *fn1, const char *fn2,
catalog_input_format_ty input_syntax,
msgdomain_list_ty **defp);
int
main (int argc, char **argv)
{
int opt;
bool do_help;
bool do_version;
char *output_file;
char *color;
msgdomain_list_ty *def;
msgdomain_list_ty *result;
catalog_input_format_ty input_syntax = &input_format_po;
catalog_output_format_ty output_syntax = &output_format_po;
bool sort_by_filepos = false;
bool sort_by_msgid = false;
/* Set program name for messages. */
set_program_name (argv[0]);
error_print_progname = maybe_print_progname;
verbosity_level = 0;
quiet = false;
gram_max_allowed_errors = UINT_MAX;
#ifdef HAVE_SETLOCALE
/* Set locale via LC_ALL. */
setlocale (LC_ALL, "");
#endif
/* Set the text message domain. */
bindtextdomain (PACKAGE, relocate (LOCALEDIR));
bindtextdomain ("bison-runtime", relocate (BISON_LOCALEDIR));
textdomain (PACKAGE);
/* Ensure that write errors on stdout are detected. */
atexit (close_stdout);
/* Set default values for variables. */
do_help = false;
do_version = false;
output_file = NULL;
color = NULL;
while ((opt = getopt_long (argc, argv, "C:D:eEFhimn:No:pPqsUvVw:",
long_options, NULL))
!= EOF)
switch (opt)
{
case '\0': /* Long option. */
break;
case 'C':
compendium (optarg);
break;
case 'D':
dir_list_append (optarg);
break;
case 'e':
message_print_style_escape (false);
break;
case 'E':
message_print_style_escape (true);
break;
case 'F':
sort_by_filepos = true;
break;
case 'h':
do_help = true;
break;
case 'i':
message_print_style_indent ();
break;
case 'm':
multi_domain_mode = true;
break;
case 'n':
if (handle_filepos_comment_option (optarg))
usage (EXIT_FAILURE);
break;
case 'N':
use_fuzzy_matching = false;
break;
case 'o':
output_file = optarg;
break;
case 'p':
output_syntax = &output_format_properties;
break;
case 'P':
input_syntax = &input_format_properties;
break;
case 'q':
quiet = true;
break;
case 's':
sort_by_msgid = true;
break;
case 'U':
update_mode = true;
break;
case 'v':
++verbosity_level;
break;
case 'V':
do_version = true;
break;
case 'w':
{
int value;
char *endp;
value = strtol (optarg, &endp, 10);
if (endp != optarg)
message_page_width_set (value);
}
break;
case CHAR_MAX + 1: /* --backup */
version_control_string = optarg;
break;
case CHAR_MAX + 2: /* --strict */
message_print_style_uniforum ();
break;
case CHAR_MAX + 3: /* --suffix */
backup_suffix_string = optarg;
break;
case CHAR_MAX + 4: /* --no-wrap */
message_page_width_ignore ();
break;
case CHAR_MAX + 5: /* --stringtable-input */
input_syntax = &input_format_stringtable;
break;
case CHAR_MAX + 6: /* --stringtable-output */
output_syntax = &output_format_stringtable;
break;
case CHAR_MAX + 7: /* --previous */
keep_previous = true;
break;
case CHAR_MAX + 8: /* --lang */
catalogname = optarg;
break;
case CHAR_MAX + 9: /* --color */
if (handle_color_option (optarg) || color_test_mode)
usage (EXIT_FAILURE);
color = optarg;
break;
case CHAR_MAX + 10: /* --style */
handle_style_option (optarg);
break;
case CHAR_MAX + 11: /* --no-location */
message_print_style_filepos (filepos_comment_none);
break;
default:
usage (EXIT_FAILURE);
break;
}
/* Version information is requested. */
if (do_version)
{
printf ("%s (GNU %s) %s\n", basename (program_name), PACKAGE, VERSION);
/* xgettext: no-wrap */
printf (_("Copyright (C) %s Free Software Foundation, Inc.\n\
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\n\
This is free software: you are free to change and redistribute it.\n\
There is NO WARRANTY, to the extent permitted by law.\n\
"),
"1995-1998, 2000-2016");
printf (_("Written by %s.\n"), proper_name ("Peter Miller"));
exit (EXIT_SUCCESS);
}
/* Help is requested. */
if (do_help)
usage (EXIT_SUCCESS);
/* Test whether we have an .po file name as argument. */
if (optind >= argc)
{
error (EXIT_SUCCESS, 0, _("no input files given"));
usage (EXIT_FAILURE);
}
if (optind + 2 != argc)
{
error (EXIT_SUCCESS, 0, _("exactly 2 input files required"));
usage (EXIT_FAILURE);
}
/* Verify selected options. */
if (update_mode)
{
if (output_file != NULL)
{
error (EXIT_FAILURE, 0, _("%s and %s are mutually exclusive"),
"--update", "--output-file");
}
if (color != NULL)
{
error (EXIT_FAILURE, 0, _("%s and %s are mutually exclusive"),
"--update", "--color");
}
if (style_file_name != NULL)
{
error (EXIT_FAILURE, 0, _("%s and %s are mutually exclusive"),
"--update", "--style");
}
}
else
{
if (version_control_string != NULL)
{
error (EXIT_SUCCESS, 0, _("%s is only valid with %s"),
"--backup", "--update");
usage (EXIT_FAILURE);
}
if (backup_suffix_string != NULL)
{
error (EXIT_SUCCESS, 0, _("%s is only valid with %s"),
"--suffix", "--update");
usage (EXIT_FAILURE);
}
}
if (sort_by_msgid && sort_by_filepos)
error (EXIT_FAILURE, 0, _("%s and %s are mutually exclusive"),
"--sort-output", "--sort-by-file");
/* In update mode, --properties-input implies --properties-output. */
if (update_mode && input_syntax == &input_format_properties)
output_syntax = &output_format_properties;
/* In update mode, --stringtable-input implies --stringtable-output. */
if (update_mode && input_syntax == &input_format_stringtable)
output_syntax = &output_format_stringtable;
/* Merge the two files. */
result = merge (argv[optind], argv[optind + 1], input_syntax, &def);
/* Sort the results. */
if (sort_by_filepos)
msgdomain_list_sort_by_filepos (result);
else if (sort_by_msgid)
msgdomain_list_sort_by_msgid (result);
if (update_mode)
{
/* Before comparing result with def, sort the result into the same order
as would be done implicitly by output_syntax->print. */
if (output_syntax->sorts_obsoletes_to_end)
msgdomain_list_stablesort_by_obsolete (result);
/* Do nothing if the original file and the result are equal. Also do
nothing if the original file and the result differ only by the
POT-Creation-Date in the header entry; this is needed for projects
which don't put the .pot file under CVS. */
if (!msgdomain_list_equal (def, result, true))
{
/* Back up def.po. */
enum backup_type backup_type;
char *backup_file;
output_file = argv[optind];
if (backup_suffix_string == NULL)
{
backup_suffix_string = getenv ("SIMPLE_BACKUP_SUFFIX");
if (backup_suffix_string != NULL
&& backup_suffix_string[0] == '\0')
backup_suffix_string = NULL;
}
if (backup_suffix_string != NULL)
simple_backup_suffix = backup_suffix_string;
backup_type = xget_version (_("backup type"), version_control_string);
if (backup_type != none)
{
backup_file = find_backup_file_name (output_file, backup_type);
copy_file_preserving (output_file, backup_file);
}
/* Write the merged message list out. */
msgdomain_list_print (result, output_file, output_syntax, true,
false);
}
}
else
{
/* Write the merged message list out. */
msgdomain_list_print (result, output_file, output_syntax, force_po,
false);
}
exit (EXIT_SUCCESS);
}
/* Display usage information and exit. */
static void
usage (int status)
{
if (status != EXIT_SUCCESS)
fprintf (stderr, _("Try '%s --help' for more information.\n"),
program_name);
else
{
printf (_("\
Usage: %s [OPTION] def.po ref.pot\n\
"), program_name);
printf ("\n");
/* xgettext: no-wrap */
printf (_("\
Merges two Uniforum style .po files together. The def.po file is an\n\
existing PO file with translations which will be taken over to the newly\n\
created file as long as they still match; comments will be preserved,\n\
but extracted comments and file positions will be discarded. The ref.pot\n\
file is the last created PO file with up-to-date source references but\n\
old translations, or a PO Template file (generally created by xgettext);\n\
any translations or comments in the file will be discarded, however dot\n\
comments and file positions will be preserved. Where an exact match\n\
cannot be found, fuzzy matching is used to produce better results.\n\
"));
printf ("\n");
printf (_("\
Mandatory arguments to long options are mandatory for short options too.\n"));
printf ("\n");
printf (_("\
Input file location:\n"));
printf (_("\
def.po translations referring to old sources\n"));
printf (_("\
ref.pot references to new sources\n"));
printf (_("\
-D, --directory=DIRECTORY add DIRECTORY to list for input files search\n"));
printf (_("\
-C, --compendium=FILE additional library of message translations,\n\
may be specified more than once\n"));
printf ("\n");
printf (_("\
Operation mode:\n"));
printf (_("\
-U, --update update def.po,\n\
do nothing if def.po already up to date\n"));
printf ("\n");
printf (_("\
Output file location:\n"));
printf (_("\
-o, --output-file=FILE write output to specified file\n"));
printf (_("\
The results are written to standard output if no output file is specified\n\
or if it is -.\n"));
printf ("\n");
printf (_("\
Output file location in update mode:\n"));
printf (_("\
The result is written back to def.po.\n"));
printf (_("\
--backup=CONTROL make a backup of def.po\n"));
printf (_("\
--suffix=SUFFIX override the usual backup suffix\n"));
printf (_("\
The version control method may be selected via the --backup option or through\n\
the VERSION_CONTROL environment variable. Here are the values:\n\
none, off never make backups (even if --backup is given)\n\
numbered, t make numbered backups\n\
existing, nil numbered if numbered backups exist, simple otherwise\n\
simple, never always make simple backups\n"));
printf (_("\
The backup suffix is '~', unless set with --suffix or the SIMPLE_BACKUP_SUFFIX\n\
environment variable.\n\
"));
printf ("\n");
printf (_("\
Operation modifiers:\n"));
printf (_("\
-m, --multi-domain apply ref.pot to each of the domains in def.po\n"));
printf (_("\
-N, --no-fuzzy-matching do not use fuzzy matching\n"));
printf (_("\
--previous keep previous msgids of translated messages\n"));
printf ("\n");
printf (_("\
Input file syntax:\n"));
printf (_("\
-P, --properties-input input files are in Java .properties syntax\n"));
printf (_("\
--stringtable-input input files are in NeXTstep/GNUstep .strings\n\
syntax\n"));
printf ("\n");
printf (_("\
Output details:\n"));
printf (_("\
--lang=CATALOGNAME set 'Language' field in the header entry\n"));
printf (_("\
--color use colors and other text attributes always\n\
--color=WHEN use colors and other text attributes if WHEN.\n\
WHEN may be 'always', 'never', 'auto', or 'html'.\n"));
printf (_("\
--style=STYLEFILE specify CSS style rule file for --color\n"));
printf (_("\
-e, --no-escape do not use C escapes in output (default)\n"));
printf (_("\
-E, --escape use C escapes in output, no extended chars\n"));
printf (_("\
--force-po write PO file even if empty\n"));
printf (_("\
-i, --indent indented output style\n"));
printf (_("\
--no-location suppress '#: filename:line' lines\n"));
printf (_("\
-n, --add-location preserve '#: filename:line' lines (default)\n"));
printf (_("\
--strict strict Uniforum output style\n"));
printf (_("\
-p, --properties-output write out a Java .properties file\n"));
printf (_("\
--stringtable-output write out a NeXTstep/GNUstep .strings file\n"));
printf (_("\
-w, --width=NUMBER set output page width\n"));
printf (_("\
--no-wrap do not break long message lines, longer than\n\
the output page width, into several lines\n"));
printf (_("\
-s, --sort-output generate sorted output\n"));
printf (_("\
-F, --sort-by-file sort output by file location\n"));
printf ("\n");
printf (_("\
Informative output:\n"));
printf (_("\
-h, --help display this help and exit\n"));
printf (_("\
-V, --version output version information and exit\n"));
printf (_("\
-v, --verbose increase verbosity level\n"));
printf (_("\
-q, --quiet, --silent suppress progress indicators\n"));
printf ("\n");
/* TRANSLATORS: The placeholder indicates the bug-reporting address
for this package. Please add _another line_ saying
"Report translation bugs to <...>\n" with the address for translation
bugs (typically your translation team's web or email address). */
fputs (_("Report bugs to <bug-gnu-gettext@gnu.org>.\n"),
stdout);
}
exit (status);
}
static void
compendium (const char *filename)
{
msgdomain_list_ty *mdlp;
size_t k;
mdlp = read_catalog_file (filename, &input_format_po);
if (compendiums == NULL)
{
compendiums = message_list_list_alloc ();
compendium_filenames = string_list_alloc ();
}
for (k = 0; k < mdlp->nitems; k++)
{
message_list_list_append (compendiums, mdlp->item[k]->messages);
string_list_append (compendium_filenames, filename);
}
}
/* Sorts obsolete messages to the end, for every domain. */
static void
msgdomain_list_stablesort_by_obsolete (msgdomain_list_ty *mdlp)
{
size_t k;
for (k = 0; k < mdlp->nitems; k++)
{
message_list_ty *mlp = mdlp->item[k]->messages;
/* Sort obsolete messages to the end. */
if (mlp->nitems > 0)
{
message_ty **l1 = XNMALLOC (mlp->nitems, message_ty *);
size_t n1;
message_ty **l2 = XNMALLOC (mlp->nitems, message_ty *);
size_t n2;
size_t j;
/* Sort the non-obsolete messages into l1 and the obsolete messages
into l2. */
n1 = 0;
n2 = 0;
for (j = 0; j < mlp->nitems; j++)
{
message_ty *mp = mlp->item[j];
if (mp->obsolete)
l2[n2++] = mp;
else
l1[n1++] = mp;
}
if (n1 > 0 && n2 > 0)
{
memcpy (mlp->item, l1, n1 * sizeof (message_ty *));
memcpy (mlp->item + n1, l2, n2 * sizeof (message_ty *));
}
free (l2);
free (l1);
}
}
}
/* Data structure representing the messages with known translations.
They are composed of
- A message list from def.po,
- The compendiums.
The data structure is optimized for exact and fuzzy searches. */
typedef struct definitions_ty definitions_ty;
struct definitions_ty
{
/* A list of message lists. The first comes from def.po, the other ones
from the compendiums. Each message list has a built-in hash table,
for speed when doing the exact searches. */
message_list_list_ty *lists;
/* A fuzzy index of the current list of non-compendium messages, for speed
when doing fuzzy searches. Used only if use_fuzzy_matching is true. */
message_fuzzy_index_ty *curr_findex;
/* A once-only execution guard for the initialization of the fuzzy index.
Needed for OpenMP. */
gl_lock_define(, curr_findex_init_lock)
/* A fuzzy index of the compendiums, for speed when doing fuzzy searches.
Used only if use_fuzzy_matching is true and compendiums != NULL. */
message_fuzzy_index_ty *comp_findex;
/* A once-only execution guard for the initialization of the fuzzy index.
Needed for OpenMP. */
gl_lock_define(, comp_findex_init_lock)
/* The canonical encoding of the definitions and the compendiums.
Only used for fuzzy matching. */
const char *canon_charset;
};
static inline void
definitions_init (definitions_ty *definitions, const char *canon_charset)
{
definitions->lists = message_list_list_alloc ();
message_list_list_append (definitions->lists, NULL);
if (compendiums != NULL)
message_list_list_append_list (definitions->lists, compendiums);
definitions->curr_findex = NULL;
gl_lock_init (definitions->curr_findex_init_lock);
definitions->comp_findex = NULL;
gl_lock_init (definitions->comp_findex_init_lock);
definitions->canon_charset = canon_charset;
}
/* Return the current list of non-compendium messages. */
static inline message_list_ty *
definitions_current_list (const definitions_ty *definitions)
{
return definitions->lists->item[0];
}
/* Set the current list of non-compendium messages. */
static inline void
definitions_set_current_list (definitions_ty *definitions, message_list_ty *mlp)
{
definitions->lists->item[0] = mlp;
if (definitions->curr_findex != NULL)
{
message_fuzzy_index_free (definitions->curr_findex);
definitions->curr_findex = NULL;
}
}
/* Create the fuzzy index for the current list of non-compendium messages.
Used only if use_fuzzy_matching is true. */
static inline void
definitions_init_curr_findex (definitions_ty *definitions)
{
/* Protect against concurrent execution. */
gl_lock_lock (definitions->curr_findex_init_lock);
if (definitions->curr_findex == NULL)
definitions->curr_findex =
message_fuzzy_index_alloc (definitions_current_list (definitions),
definitions->canon_charset);
gl_lock_unlock (definitions->curr_findex_init_lock);
}
/* Create the fuzzy index for the compendium messages.
Used only if use_fuzzy_matching is true and compendiums != NULL. */
static inline void
definitions_init_comp_findex (definitions_ty *definitions)
{
/* Protect against concurrent execution. */
gl_lock_lock (definitions->comp_findex_init_lock);
if (definitions->comp_findex == NULL)
{
/* Combine all the compendium message lists into a single one. Don't
bother checking for duplicates. */
message_list_ty *all_compendium;
size_t i;
all_compendium = message_list_alloc (false);
for (i = 0; i < compendiums->nitems; i++)
{
message_list_ty *mlp = compendiums->item[i];
size_t j;
for (j = 0; j < mlp->nitems; j++)
message_list_append (all_compendium, mlp->item[j]);
}
/* Create the fuzzy index from it. */
definitions->comp_findex =
message_fuzzy_index_alloc (all_compendium, definitions->canon_charset);
}
gl_lock_unlock (definitions->comp_findex_init_lock);
}
/* Exact search. */
static inline message_ty *
definitions_search (const definitions_ty *definitions,
const char *msgctxt, const char *msgid)
{
return message_list_list_search (definitions->lists, msgctxt, msgid);
}
/* Fuzzy search.
Used only if use_fuzzy_matching is true. */
static inline message_ty *
definitions_search_fuzzy (definitions_ty *definitions,
const char *msgctxt, const char *msgid)
{
message_ty *mp1;
if (false)
{
/* Old, slow code. */
mp1 =
message_list_search_fuzzy (definitions_current_list (definitions),
msgctxt, msgid);
}
else
{
/* Speedup through early abort in fstrcmp(), combined with pre-sorting
of the messages through a hashed index. */
/* Create the fuzzy index lazily. */
if (definitions->curr_findex == NULL)
definitions_init_curr_findex (definitions);
mp1 = message_fuzzy_index_search (definitions->curr_findex,
msgctxt, msgid,
FUZZY_THRESHOLD, false);
}
if (compendiums != NULL)
{
double lower_bound_for_mp2;
message_ty *mp2;
lower_bound_for_mp2 =
(mp1 != NULL
? fuzzy_search_goal_function (mp1, msgctxt, msgid, 0.0)
: FUZZY_THRESHOLD);
/* This lower bound must be >= FUZZY_THRESHOLD. */
if (!(lower_bound_for_mp2 >= FUZZY_THRESHOLD))
abort ();
/* Create the fuzzy index lazily. */
if (definitions->comp_findex == NULL)
definitions_init_comp_findex (definitions);
mp2 = message_fuzzy_index_search (definitions->comp_findex,
msgctxt, msgid,
lower_bound_for_mp2, true);
/* Choose the best among mp1, mp2. */
if (mp1 == NULL
|| (mp2 != NULL
&& (fuzzy_search_goal_function (mp2, msgctxt, msgid,
lower_bound_for_mp2)
> lower_bound_for_mp2)))
mp1 = mp2;
}
return mp1;
}
static inline void
definitions_destroy (definitions_ty *definitions)
{
message_list_list_free (definitions->lists, 2);
if (definitions->curr_findex != NULL)
message_fuzzy_index_free (definitions->curr_findex);
if (definitions->comp_findex != NULL)
message_fuzzy_index_free (definitions->comp_findex);
}
/* A silent error logger. We are only interested in knowing whether errors
occurred at all. */
static void
silent_error_logger (const char *format, ...)
__attribute__ ((__format__ (__printf__, 1, 2)));
static void
silent_error_logger (const char *format, ...)
{
}
/* Another silent error logger. */
static void
silent_xerror (int severity,
const struct message_ty *message,
const char *filename, size_t lineno, size_t column,
int multiline_p, const char *message_text)
{
}
static message_ty *
message_merge (message_ty *def, message_ty *ref, bool force_fuzzy,
const struct plural_distribution *distribution)
{
const char *msgstr;
size_t msgstr_len;
const char *prev_msgctxt;
const char *prev_msgid;
const char *prev_msgid_plural;
message_ty *result;
size_t j, i;
/* Take the msgid from the reference. When fuzzy matches are made,
the definition will not be unique, but the reference will be -
usually because it has only been slightly changed. */
/* Take the msgstr from the definition. The msgstr of the reference
is usually empty, as it was generated by xgettext. If we currently
process the header entry we have to merge the msgstr by using the
Report-Msgid-Bugs-To and POT-Creation-Date fields from the reference. */
if (is_header (ref))
{
/* Oh, oh. The header entry and we have something to fill in. */
static const struct
{
const char *name;
size_t len;
} known_fields[] =
{
{ "Project-Id-Version:", sizeof ("Project-Id-Version:") - 1 },
#define PROJECT_ID 0
{ "Report-Msgid-Bugs-To:", sizeof ("Report-Msgid-Bugs-To:") - 1 },
#define REPORT_MSGID_BUGS_TO 1
{ "POT-Creation-Date:", sizeof ("POT-Creation-Date:") - 1 },
#define POT_CREATION_DATE 2
{ "PO-Revision-Date:", sizeof ("PO-Revision-Date:") - 1 },
#define PO_REVISION_DATE 3
{ "Last-Translator:", sizeof ("Last-Translator:") - 1 },
#define LAST_TRANSLATOR 4
{ "Language-Team:", sizeof ("Language-Team:") - 1 },
#define LANGUAGE_TEAM 5
{ "Language:", sizeof ("Language:") - 1 },
#define LANGUAGE 6
{ "MIME-Version:", sizeof ("MIME-Version:") - 1 },
#define MIME_VERSION 7
{ "Content-Type:", sizeof ("Content-Type:") - 1 },
#define CONTENT_TYPE 8
{ "Content-Transfer-Encoding:",
sizeof ("Content-Transfer-Encoding:") - 1 }
#define CONTENT_TRANSFER 9
};
#define UNKNOWN 10
struct
{
const char *string;
size_t len;
} header_fields[UNKNOWN + 1];
struct obstack pool;
const char *cp;
char *newp;
size_t len, cnt;
/* Clear all fields. */
memset (header_fields, '\0', sizeof (header_fields));
/* Prepare a temporary memory pool. */
obstack_init (&pool);
cp = def->msgstr;
while (*cp != '\0')
{
const char *endp = strchr (cp, '\n');
int terminated = endp != NULL;
if (!terminated)
{
/* Add a trailing newline. */
char *copy;
endp = strchr (cp, '\0');
len = endp - cp + 1;
copy = (char *) obstack_alloc (&pool, len + 1);
stpcpy (stpcpy (copy, cp), "\n");
cp = copy;
}
else
{
len = (endp - cp) + 1;
++endp;
}
/* Compare with any of the known fields. */
for (cnt = 0;
cnt < sizeof (known_fields) / sizeof (known_fields[0]);
++cnt)
if (c_strncasecmp (cp, known_fields[cnt].name, known_fields[cnt].len)
== 0)
break;
if (cnt < sizeof (known_fields) / sizeof (known_fields[0]))
{
header_fields[cnt].string = &cp[known_fields[cnt].len];
header_fields[cnt].len = len - known_fields[cnt].len;
}
else
{
/* It's an unknown field. Append content to what is already
known. */
char *extended =
(char *) obstack_alloc (&pool,
header_fields[UNKNOWN].len + len + 1);
if (header_fields[UNKNOWN].string)
memcpy (extended, header_fields[UNKNOWN].string,
header_fields[UNKNOWN].len);
memcpy (&extended[header_fields[UNKNOWN].len], cp, len);
extended[header_fields[UNKNOWN].len + len] = '\0';
header_fields[UNKNOWN].string = extended;
header_fields[UNKNOWN].len += len;
}
cp = endp;
}
/* Set the Language field if specified on the command line. */
if (catalogname != NULL)
{
/* Prepend a space and append a newline. */
size_t len = strlen (catalogname);
char *copy = (char *) obstack_alloc (&pool, 1 + len + 1 + 1);
stpcpy (stpcpy (stpcpy (copy, " "), catalogname), "\n");
header_fields[LANGUAGE].string = copy;
header_fields[LANGUAGE].len = strlen (header_fields[LANGUAGE].string);
}
/* Add a Language field to PO files that don't have one. The Language
field was introduced in gettext-0.18. */
else if (header_fields[LANGUAGE].string == NULL)
{
const char *language_team_ptr = header_fields[LANGUAGE_TEAM].string;
if (language_team_ptr != NULL)
{
size_t language_team_len = header_fields[LANGUAGE_TEAM].len;
/* Trim leading blanks. */
while (language_team_len > 0
&& (*language_team_ptr == ' '
|| *language_team_ptr == '\t'))
{
language_team_ptr++;
language_team_len--;
}
/* Trim trailing blanks. */
while (language_team_len > 0
&& (language_team_ptr[language_team_len - 1] == ' '
|| language_team_ptr[language_team_len - 1] == '\t'))
language_team_len--;
/* Trim last word, if it looks like an URL or email address. */
{
size_t i;
for (i = language_team_len; i > 0; i--)
if (language_team_ptr[i - 1] == ' '
|| language_team_ptr[i - 1] == '\t')
break;
/* The last word: language_team_ptr[i..language_team_len-1]. */
if (i < language_team_len
&& (language_team_ptr[i] == '<'
|| language_team_ptr[language_team_len - 1] == '>'
|| memchr (language_team_ptr, '@', language_team_len)
!= NULL
|| memchr (language_team_ptr, '/', language_team_len)
!= NULL))
{
/* Trim last word and blanks before it. */
while (i > 0
&& (language_team_ptr[i - 1] == ' '
|| language_team_ptr[i - 1] == '\t'))
i--;
language_team_len = i;
}
}
/* The rest of the Language-Team field should be the english name
of the languge. Convert to ISO 639 and ISO 3166 syntax. */
{
size_t i;
for (i = 0; i < language_variant_table_size; i++)
if (strlen (language_variant_table[i].english)
== language_team_len
&& memcmp (language_variant_table[i].english,
language_team_ptr, language_team_len) == 0)
{
header_fields[LANGUAGE].string =
language_variant_table[i].code;
break;
}
}
if (header_fields[LANGUAGE].string == NULL)
{
size_t i;
for (i = 0; i < language_table_size; i++)
if (strlen (language_table[i].english) == language_team_len
&& memcmp (language_table[i].english,
language_team_ptr, language_team_len) == 0)
{
header_fields[LANGUAGE].string = language_table[i].code;
break;
}
}
if (header_fields[LANGUAGE].string != NULL)
{
/* Prepend a space and append a newline. */
const char *str = header_fields[LANGUAGE].string;
size_t len = strlen (str);
char *copy = (char *) obstack_alloc (&pool, 1 + len + 1 + 1);
stpcpy (stpcpy (stpcpy (copy, " "), str), "\n");
header_fields[LANGUAGE].string = copy;
}
else
header_fields[LANGUAGE].string = " \n";
header_fields[LANGUAGE].len =
strlen (header_fields[LANGUAGE].string);
}
}
{
const char *msgid_bugs_ptr;
msgid_bugs_ptr = c_strstr (ref->msgstr, "Report-Msgid-Bugs-To:");
if (msgid_bugs_ptr != NULL)
{
size_t msgid_bugs_len;
const char *endp;
msgid_bugs_ptr += sizeof ("Report-Msgid-Bugs-To:") - 1;
endp = strchr (msgid_bugs_ptr, '\n');
if (endp == NULL)
{
/* Add a trailing newline. */
char *extended;
endp = strchr (msgid_bugs_ptr, '\0');
msgid_bugs_len = (endp - msgid_bugs_ptr) + 1;
extended = (char *) obstack_alloc (&pool, msgid_bugs_len + 1);
stpcpy (stpcpy (extended, msgid_bugs_ptr), "\n");
msgid_bugs_ptr = extended;
}
else
msgid_bugs_len = (endp - msgid_bugs_ptr) + 1;
header_fields[REPORT_MSGID_BUGS_TO].string = msgid_bugs_ptr;
header_fields[REPORT_MSGID_BUGS_TO].len = msgid_bugs_len;
}
}
{
const char *pot_date_ptr;
pot_date_ptr = c_strstr (ref->msgstr, "POT-Creation-Date:");
if (pot_date_ptr != NULL)
{
size_t pot_date_len;
const char *endp;
pot_date_ptr += sizeof ("POT-Creation-Date:") - 1;
endp = strchr (pot_date_ptr, '\n');
if (endp == NULL)
{
/* Add a trailing newline. */
char *extended;
endp = strchr (pot_date_ptr, '\0');
pot_date_len = (endp - pot_date_ptr) + 1;
extended = (char *) obstack_alloc (&pool, pot_date_len + 1);
stpcpy (stpcpy (extended, pot_date_ptr), "\n");
pot_date_ptr = extended;
}
else
pot_date_len = (endp - pot_date_ptr) + 1;
header_fields[POT_CREATION_DATE].string = pot_date_ptr;
header_fields[POT_CREATION_DATE].len = pot_date_len;
}
}
/* Concatenate all the various fields. */
len = 0;
for (cnt = 0; cnt < UNKNOWN; ++cnt)
if (header_fields[cnt].string != NULL)
len += known_fields[cnt].len + header_fields[cnt].len;
len += header_fields[UNKNOWN].len;
cp = newp = XNMALLOC (len + 1, char);
newp[len] = '\0';
#define IF_FILLED(idx) \
if (header_fields[idx].string) \
newp = stpncpy (stpcpy (newp, known_fields[idx].name), \
header_fields[idx].string, header_fields[idx].len)
IF_FILLED (PROJECT_ID);
IF_FILLED (REPORT_MSGID_BUGS_TO);
IF_FILLED (POT_CREATION_DATE);
IF_FILLED (PO_REVISION_DATE);
IF_FILLED (LAST_TRANSLATOR);
IF_FILLED (LANGUAGE_TEAM);
IF_FILLED (LANGUAGE);
IF_FILLED (MIME_VERSION);
IF_FILLED (CONTENT_TYPE);
IF_FILLED (CONTENT_TRANSFER);
if (header_fields[UNKNOWN].string != NULL)
stpcpy (newp, header_fields[UNKNOWN].string);
#undef IF_FILLED
/* Free the temporary memory pool. */
obstack_free (&pool, NULL);
msgstr = cp;
msgstr_len = strlen (cp) + 1;
prev_msgctxt = NULL;
prev_msgid = NULL;
prev_msgid_plural = NULL;
}
else
{
msgstr = def->msgstr;
msgstr_len = def->msgstr_len;
if (def->is_fuzzy)
{
prev_msgctxt = def->prev_msgctxt;
prev_msgid = def->prev_msgid;
prev_msgid_plural = def->prev_msgid_plural;
}
else
{
prev_msgctxt = def->msgctxt;
prev_msgid = def->msgid;
prev_msgid_plural = def->msgid_plural;
}
}
result = message_alloc (ref->msgctxt != NULL ? xstrdup (ref->msgctxt) : NULL,
xstrdup (ref->msgid), ref->msgid_plural,
msgstr, msgstr_len, &def->pos);
/* Take the comments from the definition file. There will be none at
all in the reference file, as it was generated by xgettext. */
if (def->comment)
for (j = 0; j < def->comment->nitems; ++j)
message_comment_append (result, def->comment->item[j]);
/* Take the dot comments from the reference file, as they are
generated by xgettext. Any in the definition file are old ones
collected by previous runs of xgettext and msgmerge. */
if (ref->comment_dot)
for (j = 0; j < ref->comment_dot->nitems; ++j)
message_comment_dot_append (result, ref->comment_dot->item[j]);
/* The flags are mixed in a special way. Some informations come
from the reference message (such as format/no-format), others
come from the definition file (fuzzy or not). */
result->is_fuzzy = def->is_fuzzy | force_fuzzy;
/* If ref and def have the same msgid but different msgid_plural, it's
a reason to mark the result fuzzy. */
if (!result->is_fuzzy
&& (ref->msgid_plural != NULL
? def->msgid_plural == NULL
|| strcmp (ref->msgid_plural, def->msgid_plural) != 0
: def->msgid_plural != NULL))
result->is_fuzzy = true;
for (i = 0; i < NFORMATS; i++)
{
result->is_format[i] = ref->is_format[i];
/* If the reference message is marked as being a format specifier,
but the definition message is not, we check if the resulting
message would pass "msgfmt -c". If yes, then all is fine. If
not, we add a fuzzy marker, because
1. the message needs the translator's attention,
2. msgmerge must not transform a PO file which passes "msgfmt -c"
into a PO file which doesn't. */
if (!result->is_fuzzy
&& possible_format_p (ref->is_format[i])
&& !possible_format_p (def->is_format[i])
&& check_msgid_msgstr_format_i (ref->msgid, ref->msgid_plural,
msgstr, msgstr_len, i, ref->range,
distribution, silent_error_logger)
> 0)
result->is_fuzzy = true;
}
result->range = ref->range;
/* If the definition message was assuming a certain range, but the reference
message does not specify a range any more or specifies a range that is
not the same or a subset, we add a fuzzy marker, because
1. the message needs the translator's attention,
2. msgmerge must not transform a PO file which passes "msgfmt -c"
into a PO file which doesn't. */
if (!result->is_fuzzy
&& has_range_p (def->range)
&& !(has_range_p (ref->range)
&& ref->range.min >= def->range.min
&& ref->range.max <= def->range.max))
result->is_fuzzy = true;
result->do_wrap = ref->do_wrap;
for (i = 0; i < NSYNTAXCHECKS; i++)
result->do_syntax_check[i] = ref->do_syntax_check[i];
/* Insert previous msgid, commented out with "#|".
Do so only when --previous is specified, for backward compatibility.
Since the "previous msgid" represents the original msgid that led to
the current msgstr,
- we can omit it if the resulting message is not fuzzy or is
untranslated (but do this in a later pass, since result->is_fuzzy
is not finalized at this point),
- otherwise, if the corresponding message from the definition file
was translated (not fuzzy), we use that message's msgid,
- otherwise, we use that message's prev_msgid. */
if (keep_previous)
{
result->prev_msgctxt = prev_msgctxt;
result->prev_msgid = prev_msgid;
result->prev_msgid_plural = prev_msgid_plural;
}
/* If the reference message was obsolete, make the resulting message
obsolete. This case doesn't occur for POT files, but users sometimes
use PO files that are themselves the result of msgmerge instead of POT
files. */
result->obsolete = ref->obsolete;
/* Take the file position comments from the reference file, as they
are generated by xgettext. Any in the definition file are old ones
collected by previous runs of xgettext and msgmerge. */
for (j = 0; j < ref->filepos_count; ++j)
{
lex_pos_ty *pp = &ref->filepos[j];
message_comment_filepos (result, pp->file_name, pp->line_number);
}
/* Special postprocessing is needed if the reference message is a
plural form and the definition message isn't, or vice versa. */
if (ref->msgid_plural != NULL)
{
if (def->msgid_plural == NULL)
result->used = 1;
}
else
{
if (def->msgid_plural != NULL)
result->used = 2;
}
/* All done, return the merged message to the caller. */
return result;
}
#define DOT_FREQUENCY 10
static void
match_domain (const char *fn1, const char *fn2,
definitions_ty *definitions, message_list_ty *refmlp,
message_list_ty *resultmlp,
struct statistics *stats, unsigned int *processed)
{
message_ty *header_entry;
unsigned long int nplurals;
const struct expression *plural_expr;
char *untranslated_plural_msgstr;
struct plural_distribution distribution;
struct search_result { message_ty *found; bool fuzzy; } *search_results;
size_t j;
header_entry =
message_list_search (definitions_current_list (definitions), NULL, "");
extract_plural_expression (header_entry ? header_entry->msgstr : NULL,
&plural_expr, &nplurals);
untranslated_plural_msgstr = XNMALLOC (nplurals, char);
memset (untranslated_plural_msgstr, '\0', nplurals);
/* Determine the plural distribution of the plural_expr formula. */
{
/* Disable error output temporarily. */
void (*old_po_xerror) (int, const struct message_ty *, const char *, size_t,
size_t, int, const char *)
= po_xerror;
po_xerror = silent_xerror;
if (check_plural_eval (plural_expr, nplurals, header_entry,
&distribution) > 0)
{
distribution.expr = NULL;
distribution.often = NULL;
distribution.often_length = 0;
distribution.histogram = NULL;
}
po_xerror = old_po_xerror;
}
/* Most of the time is spent in definitions_search_fuzzy.
Perform it in a separate loop that can be parallelized by an OpenMP
capable compiler. */
search_results = XNMALLOC (refmlp->nitems, struct search_result);
{
long int nn = refmlp->nitems;
long int jj;
/* Tell the OpenMP capable compiler to distribute this loop across
several threads. The schedule is dynamic, because for some messages
the loop body can be executed very quickly, whereas for others it takes
a long time.
Note: The Sun Workshop 6.2 C compiler does not allow a space between
'#' and 'pragma'. */
#ifdef _OPENMP
#pragma omp parallel for schedule(dynamic)
#endif
for (jj = 0; jj < nn; jj++)
{
message_ty *refmsg = refmlp->item[jj];
message_ty *defmsg;
/* Because merging can take a while we print something to signal
we are not dead. */
if (!quiet && verbosity_level <= 1 && *processed % DOT_FREQUENCY == 0)
fputc ('.', stderr);
#ifdef _OPENMP
#pragma omp atomic
#endif
(*processed)++;
/* See if it is in the other file. */
defmsg =
definitions_search (definitions, refmsg->msgctxt, refmsg->msgid);
if (defmsg != NULL)
{
search_results[jj].found = defmsg;
search_results[jj].fuzzy = false;
}
else if (!is_header (refmsg)
/* If the message was not defined at all, try to find a very
similar message, it could be a typo, or the suggestion may
help. */
&& use_fuzzy_matching
&& ((defmsg =
definitions_search_fuzzy (definitions,
refmsg->msgctxt,
refmsg->msgid)) != NULL))
{
search_results[jj].found = defmsg;
search_results[jj].fuzzy = true;
}
else
search_results[jj].found = NULL;
}
}
for (j = 0; j < refmlp->nitems; j++)
{
message_ty *refmsg = refmlp->item[j];
/* See if it is in the other file.
This used definitions_search. */
if (search_results[j].found != NULL && !search_results[j].fuzzy)
{
message_ty *defmsg = search_results[j].found;
/* Merge the reference with the definition: take the #. and
#: comments from the reference, take the # comments from
the definition, take the msgstr from the definition. Add
this merged entry to the output message list. */
message_ty *mp =
message_merge (defmsg, refmsg, false, &distribution);
message_list_append (resultmlp, mp);
/* Remember that this message has been used, when we scan
later to see if anything was omitted. */
defmsg->used = 1;
stats->merged++;
}
else if (!is_header (refmsg))
{
/* If the message was not defined at all, try to find a very
similar message, it could be a typo, or the suggestion may
help. This search assumed use_fuzzy_matching and used
definitions_search_fuzzy. */
if (search_results[j].found != NULL && search_results[j].fuzzy)
{
message_ty *defmsg = search_results[j].found;
message_ty *mp;
if (verbosity_level > 1)
{
po_gram_error_at_line (&refmsg->pos, _("\
this message is used but not defined..."));
error_message_count--;
po_gram_error_at_line (&defmsg->pos, _("\
...but this definition is similar"));
}
/* Merge the reference with the definition: take the #. and
#: comments from the reference, take the # comments from
the definition, take the msgstr from the definition. Add
this merged entry to the output message list. */
mp = message_merge (defmsg, refmsg, true, &distribution);
message_list_append (resultmlp, mp);
/* Remember that this message has been used, when we scan
later to see if anything was omitted. */
defmsg->used = 1;
stats->fuzzied++;
if (!quiet && verbosity_level <= 1)
/* Always print a dot if we handled a fuzzy match. */
fputc ('.', stderr);
}
else
{
message_ty *mp;
bool is_untranslated;
const char *p;
const char *pend;
if (verbosity_level > 1)
po_gram_error_at_line (&refmsg->pos, _("\
this message is used but not defined in %s"), fn1);
mp = message_copy (refmsg);
if (mp->msgid_plural != NULL)
{
/* Test if mp is untranslated. (It most likely is.) */
is_untranslated = true;
for (p = mp->msgstr, pend = p + mp->msgstr_len; p < pend; p++)
if (*p != '\0')
{
is_untranslated = false;
break;
}
if (is_untranslated)
{
/* Change mp->msgstr_len consecutive empty strings into
nplurals consecutive empty strings. */
if (nplurals > mp->msgstr_len)
mp->msgstr = untranslated_plural_msgstr;
mp->msgstr_len = nplurals;
}
}
message_list_append (resultmlp, mp);
stats->missing++;
}
}
}
free (search_results);
/* Now postprocess the problematic merges. This is needed because we
want the result to pass the "msgfmt -c -v" check. */
{
/* message_merge sets mp->used to 1 or 2, depending on the problem.
Compute the bitwise OR of all these. */
int problematic = 0;
for (j = 0; j < resultmlp->nitems; j++)
problematic |= resultmlp->item[j]->used;
if (problematic)
{
unsigned long int nplurals = 0;
if (problematic & 1)
{
/* Need to know nplurals of the result domain. */
message_ty *header_entry =
message_list_search (resultmlp, NULL, "");
nplurals = get_plural_count (header_entry
? header_entry->msgstr
: NULL);
}
for (j = 0; j < resultmlp->nitems; j++)
{
message_ty *mp = resultmlp->item[j];
if ((mp->used & 1) && (nplurals > 0))
{
/* ref->msgid_plural != NULL but def->msgid_plural == NULL.
Use a copy of def->msgstr for each possible plural form. */
size_t new_msgstr_len;
char *new_msgstr;
char *p;
unsigned long i;
if (verbosity_level > 1)
{
po_gram_error_at_line (&mp->pos, _("\
this message should define plural forms"));
}
new_msgstr_len = nplurals * mp->msgstr_len;
new_msgstr = XNMALLOC (new_msgstr_len, char);
for (i = 0, p = new_msgstr; i < nplurals; i++)
{
memcpy (p, mp->msgstr, mp->msgstr_len);
p += mp->msgstr_len;
}
mp->msgstr = new_msgstr;
mp->msgstr_len = new_msgstr_len;
mp->is_fuzzy = true;
}
if ((mp->used & 2) && (mp->msgstr_len > strlen (mp->msgstr) + 1))
{
/* ref->msgid_plural == NULL but def->msgid_plural != NULL.
Use only the first among the plural forms. */
if (verbosity_level > 1)
{
po_gram_error_at_line (&mp->pos, _("\
this message should not define plural forms"));
}
mp->msgstr_len = strlen (mp->msgstr) + 1;
mp->is_fuzzy = true;
}
/* Postprocessing of this message is done. */
mp->used = 0;
}
}
}
/* Now that mp->is_fuzzy is finalized for all messages, remove the
"previous msgid" information from all messages that are not fuzzy or
are untranslated. */
for (j = 0; j < resultmlp->nitems; j++)
{
message_ty *mp = resultmlp->item[j];
if (!mp->is_fuzzy || mp->msgstr[0] == '\0')
{
mp->prev_msgctxt = NULL;
mp->prev_msgid = NULL;
mp->prev_msgid_plural = NULL;
}
}
}
static msgdomain_list_ty *
merge (const char *fn1, const char *fn2, catalog_input_format_ty input_syntax,
msgdomain_list_ty **defp)
{
msgdomain_list_ty *def;
msgdomain_list_ty *ref;
size_t j, k;
unsigned int processed;
struct statistics stats;
msgdomain_list_ty *result;
const char *def_canon_charset;
definitions_ty definitions;
message_list_ty *empty_list;
stats.merged = stats.fuzzied = stats.missing = stats.obsolete = 0;
/* This is the definitions file, created by a human. */
def = read_catalog_file (fn1, input_syntax);
/* This is the references file, created by groping the sources with
the xgettext program. */
ref = read_catalog_file (fn2, input_syntax);
/* Add a dummy header entry, if the references file contains none. */
for (k = 0; k < ref->nitems; k++)
if (message_list_search (ref->item[k]->messages, NULL, "") == NULL)
{
static lex_pos_ty pos = { __FILE__, __LINE__ };
message_ty *refheader = message_alloc (NULL, "", NULL, "", 1, &pos);
message_list_prepend (ref->item[k]->messages, refheader);
}
/* The references file can be either in ASCII or in UTF-8. If it is
in UTF-8, we have to convert the definitions and the compendiums to
UTF-8 as well. */
{
bool was_utf8 = false;
for (k = 0; k < ref->nitems; k++)
{
message_list_ty *mlp = ref->item[k]->messages;
for (j = 0; j < mlp->nitems; j++)
if (is_header (mlp->item[j]) && !mlp->item[j]->obsolete)
{
const char *header = mlp->item[j]->msgstr;
if (header != NULL)
{
const char *charsetstr = c_strstr (header, "charset=");
if (charsetstr != NULL)
{
size_t len;
charsetstr += strlen ("charset=");
len = strcspn (charsetstr, " \t\n");
if (len == strlen ("UTF-8")
&& c_strncasecmp (charsetstr, "UTF-8", len) == 0)
was_utf8 = true;
}
}
}
}
if (was_utf8)
{
def = iconv_msgdomain_list (def, "UTF-8", true, fn1);
if (compendiums != NULL)
for (k = 0; k < compendiums->nitems; k++)
iconv_message_list (compendiums->item[k], NULL, po_charset_utf8,
compendium_filenames->item[k]);
}
else if (compendiums != NULL && compendiums->nitems > 0)
{
/* Ensure that the definitions and the compendiums are in the same
encoding. Prefer the encoding of the definitions file, if
possible; otherwise, if the definitions file is empty and the
compendiums are all in the same encoding, use that encoding;
otherwise, use UTF-8. */
bool conversion_done = false;
{
char *charset = NULL;
/* Get the encoding of the definitions file. */
for (k = 0; k < def->nitems; k++)
{
message_list_ty *mlp = def->item[k]->messages;
for (j = 0; j < mlp->nitems; j++)
if (is_header (mlp->item[j]) && !mlp->item[j]->obsolete)
{
const char *header = mlp->item[j]->msgstr;
if (header != NULL)
{
const char *charsetstr = c_strstr (header, "charset=");
if (charsetstr != NULL)
{
size_t len;
charsetstr += strlen ("charset=");
len = strcspn (charsetstr, " \t\n");
charset = (char *) xmalloca (len + 1);
memcpy (charset, charsetstr, len);
charset[len] = '\0';
break;
}
}
}
if (charset != NULL)
break;
}
if (charset != NULL)
{
const char *canon_charset = po_charset_canonicalize (charset);
if (canon_charset != NULL)
{
bool all_compendiums_iconvable = true;
if (compendiums != NULL)
for (k = 0; k < compendiums->nitems; k++)
if (!is_message_list_iconvable (compendiums->item[k],
NULL, canon_charset))
{
all_compendiums_iconvable = false;
break;
}
if (all_compendiums_iconvable)
{
/* Convert the compendiums to def's encoding. */
if (compendiums != NULL)
for (k = 0; k < compendiums->nitems; k++)
iconv_message_list (compendiums->item[k],
NULL, canon_charset,
compendium_filenames->item[k]);
conversion_done = true;
}
}
freea (charset);
}
}
if (!conversion_done)
{
if (def->nitems == 0
|| (def->nitems == 1 && def->item[0]->messages->nitems == 0))
{
/* The definitions file is empty.
Compare the encodings of the compendiums. */
const char *common_canon_charset = NULL;
for (k = 0; k < compendiums->nitems; k++)
{
message_list_ty *mlp = compendiums->item[k];
char *charset = NULL;
const char *canon_charset = NULL;
for (j = 0; j < mlp->nitems; j++)
if (is_header (mlp->item[j]) && !mlp->item[j]->obsolete)
{
const char *header = mlp->item[j]->msgstr;
if (header != NULL)
{
const char *charsetstr =
c_strstr (header, "charset=");
if (charsetstr != NULL)
{
size_t len;
charsetstr += strlen ("charset=");
len = strcspn (charsetstr, " \t\n");
charset = (char *) xmalloca (len + 1);
memcpy (charset, charsetstr, len);
charset[len] = '\0';
break;
}
}
}
if (charset != NULL)
{
canon_charset = po_charset_canonicalize (charset);
freea (charset);
}
/* If no charset declaration was found in this file,
or if it is not a valid encoding name, or if it
differs from the common charset found so far,
we have no common charset. */
if (canon_charset == NULL
|| (common_canon_charset != NULL
&& canon_charset != common_canon_charset))
{
common_canon_charset = NULL;
break;
}
common_canon_charset = canon_charset;
}
if (common_canon_charset != NULL)
/* No conversion needed in this case. */
conversion_done = true;
}
if (!conversion_done)
{
/* It's too hairy to find out what would be the optimal target
encoding. So, convert everything to UTF-8. */
def = iconv_msgdomain_list (def, "UTF-8", true, fn1);
if (compendiums != NULL)
for (k = 0; k < compendiums->nitems; k++)
iconv_message_list (compendiums->item[k],
NULL, po_charset_utf8,
compendium_filenames->item[k]);
}
}
}
}
/* Determine canonicalized encoding name of the definitions now, after
conversion. Only used for fuzzy matching. */
if (use_fuzzy_matching)
{
def_canon_charset = def->encoding;
if (def_canon_charset == NULL)
{
char *charset = NULL;
/* Get the encoding of the definitions file. */
for (k = 0; k < def->nitems; k++)
{
message_list_ty *mlp = def->item[k]->messages;
for (j = 0; j < mlp->nitems; j++)
if (is_header (mlp->item[j]) && !mlp->item[j]->obsolete)
{
const char *header = mlp->item[j]->msgstr;
if (header != NULL)
{
const char *charsetstr = c_strstr (header, "charset=");
if (charsetstr != NULL)
{
size_t len;
charsetstr += strlen ("charset=");
len = strcspn (charsetstr, " \t\n");
charset = (char *) xmalloca (len + 1);
memcpy (charset, charsetstr, len);
charset[len] = '\0';
break;
}
}
}
if (charset != NULL)
break;
}
if (charset != NULL)
def_canon_charset = po_charset_canonicalize (charset);
if (def_canon_charset == NULL)
/* Unspecified encoding. Assume unibyte encoding. */
def_canon_charset = po_charset_ascii;
}
}
else
def_canon_charset = NULL;
/* Initialize and preprocess the total set of message definitions. */
definitions_init (&definitions, def_canon_charset);
empty_list = message_list_alloc (false);
result = msgdomain_list_alloc (false);
processed = 0;
/* Every reference must be matched with its definition. */
if (!multi_domain_mode)
for (k = 0; k < ref->nitems; k++)
{
const char *domain = ref->item[k]->domain;
message_list_ty *refmlp = ref->item[k]->messages;
message_list_ty *resultmlp =
msgdomain_list_sublist (result, domain, true);
message_list_ty *defmlp;
defmlp = msgdomain_list_sublist (def, domain, false);
if (defmlp == NULL)
defmlp = empty_list;
definitions_set_current_list (&definitions, defmlp);
match_domain (fn1, fn2, &definitions, refmlp, resultmlp,
&stats, &processed);
}
else
{
/* Apply the references messages in the default domain to each of
the definition domains. */
message_list_ty *refmlp = ref->item[0]->messages;
for (k = 0; k < def->nitems; k++)
{
const char *domain = def->item[k]->domain;
message_list_ty *defmlp = def->item[k]->messages;
/* Ignore the default message domain if it has no messages. */
if (k > 0 || defmlp->nitems > 0)
{
message_list_ty *resultmlp =
msgdomain_list_sublist (result, domain, true);
definitions_set_current_list (&definitions, defmlp);
match_domain (fn1, fn2, &definitions, refmlp, resultmlp,
&stats, &processed);
}
}
}
definitions_destroy (&definitions);
/* Look for messages in the definition file, which are not present
in the reference file, indicating messages which defined but not
used in the program. Don't scan the compendium(s). */
for (k = 0; k < def->nitems; ++k)
{
const char *domain = def->item[k]->domain;
message_list_ty *defmlp = def->item[k]->messages;
for (j = 0; j < defmlp->nitems; j++)
{
message_ty *defmsg = defmlp->item[j];
if (!defmsg->used)
{
/* Remember the old translation although it is not used anymore.
But we mark it as obsolete. */
message_ty *mp;
mp = message_copy (defmsg);
/* Clear the extracted comments. */
if (mp->comment_dot != NULL)
{
string_list_free (mp->comment_dot);
mp->comment_dot = NULL;
}
/* Clear the file position comments. */
if (mp->filepos != NULL)
{
size_t i;
for (i = 0; i < mp->filepos_count; i++)
free ((char *) mp->filepos[i].file_name);
mp->filepos_count = 0;
free (mp->filepos);
mp->filepos = NULL;
}
/* Mark as obsolete. */
mp->obsolete = true;
message_list_append (msgdomain_list_sublist (result, domain, true),
mp);
stats.obsolete++;
}
}
}
/* Determine the known a-priori encoding, if any. */
if (def->encoding == ref->encoding)
result->encoding = def->encoding;
/* Report some statistics. */
if (verbosity_level > 0)
fprintf (stderr, _("%s\
Read %ld old + %ld reference, \
merged %ld, fuzzied %ld, missing %ld, obsolete %ld.\n"),
!quiet && verbosity_level <= 1 ? "\n" : "",
(long) def->nitems, (long) ref->nitems,
(long) stats.merged, (long) stats.fuzzied, (long) stats.missing,
(long) stats.obsolete);
else if (!quiet)
fputs (_(" done.\n"), stderr);
/* Return results. */
*defp = def;
return result;
}
|
AttentionHelp.h | #ifndef ATTENTION_HELP
#define ATTENTION_HELP
/*
* AttentionHelp.h:
* attention softmax help nodes
*
* Created on: Apr 22, 2017
* Author: mszhang
*/
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
#include <memory>
class AttentionSoftMaxNode : public Node {
public:
vector<dtype> masks, mask_losses;
vector<dtype> unnormed_masks;
dtype sum;
vector<PNode> unnormeds;
vector<PNode> ins;
AttentionSoftMaxNode() : Node() {
ins.clear();
unnormeds.clear();
node_type = "AttentionSoftmax";
}
~AttentionSoftMaxNode() {
masks.clear();
mask_losses.clear();
unnormed_masks.clear();
ins.clear();
unnormeds.clear();
}
#if USE_GPU
void toNodeInfo(NodeInfo &info) const override {
Node::toNodeInfo(info);
info.input_count = ins.size();
info.input_vals.reserve(ins.size() * 2);
info.input_losses.reserve(ins.size() * 2);
for (PNode p : ins) {
info.input_vals.push_back(p->val.value);
info.input_losses.push_back(p->loss.value);
}
for (PNode p : unnormeds) {
info.input_vals.push_back(p->val.value);
info.input_losses.push_back(p->loss.value);
}
}
#endif
inline void clearValue() {
Node::clearValue();
ins.clear();
unnormeds.clear();
sum = 0;
}
inline void setParam(int maxsize) {
masks.resize(maxsize);
mask_losses.resize(maxsize);
unnormed_masks.resize(maxsize);
}
inline void init(int ndim, dtype dropout) {
Node::init(ndim, dropout);
}
public:
void forward(Graph *cg, const vector<PNode>& x, const vector<PNode>& a) {
if (x.size() == 0) {
std::cout << "empty inputs for attention help node" << std::endl;
return;
}
if (x.size() != a.size()) {
std::cout << "the number of input nodes does not equal the number of attention factors." << std::endl;
return;
}
int nSize = x.size();
ins.clear();
unnormeds.clear();
for (int i = 0; i < nSize; i++) {
if (x[i]->val.dim != dim || a[i]->val.dim != 1) {
std::cout << "input matrixes are not matched" << std::endl;
clearValue();
return;
}
ins.push_back(x[i]);
unnormeds.push_back(a[i]);
}
degree = 0;
for (int i = 0; i < nSize; i++) {
ins[i]->addParent(this);
unnormeds[i]->addParent(this);
}
cg->addNode(this);
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
return Node::typeEqual(other);
}
public:
inline void compute() {
int nSize = ins.size();
sum = 0;
for (int i = 0; i < nSize; ++i) {
unnormed_masks[i] = fexp(unnormeds[i]->val[0]);
sum += unnormed_masks[i];
}
for (int i = 0; i < nSize; ++i) {
masks[i] = unnormed_masks[i] / sum;
}
val.zero();
for (int i = 0; i < nSize; ++i) {
val.vec() += masks[i] * ins[i]->val.vec();
}
}
void backward() {
int nSize = ins.size();
for (int i = 0; i < nSize; i++) {
ins[i]->loss.vec() += loss.vec() * masks[i];
mask_losses[i] = 0;
for (int idx = 0; idx < dim; idx++) {
mask_losses[i] += loss[idx] * ins[i]->val[idx];
}
}
for (int i = 0; i < nSize; i++) {
for (int j = 0; j < nSize; j++) {
unnormeds[i]->loss[0] -= masks[i] * masks[j] * mask_losses[j];
if (i == j) {
unnormeds[i]->loss[0] += masks[i] * mask_losses[i];
}
}
}
}
};
#if USE_GPU
class AttentionSoftMaxExecute : public Execute {
public:
int dim;
std::vector<int> in_counts;
int max_in_count;
std::vector<std::shared_ptr<Tensor2D>> masks;
std::vector<dtype*> raw_masks;
std::vector<dtype*> ins;
void forward() {
int count = batch.size();
in_counts.reserve(count);
masks.reserve(count);
for (Node *n : batch) {
AttentionSoftMaxNode *attention =
static_cast<AttentionSoftMaxNode*>(n);
in_counts.push_back(attention->ins.size());
}
max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
for (Node *n : batch) {
AttentionSoftMaxNode *attention =
static_cast<AttentionSoftMaxNode*>(n);
in_counts.push_back(attention->ins.size());
auto p = std::make_shared<Tensor2D>();
p->init(dim, max_in_count);
masks.push_back(p);
}
std::vector<dtype*> unnormeds, vals;
ins.reserve(count * max_in_count);
unnormeds.reserve(count * max_in_count);
vals.reserve(count);
for (Node *n : batch) {
AttentionSoftMaxNode *att = static_cast<AttentionSoftMaxNode*>(n);
vals.push_back(att->val.value);
for (int i = 0; i < att->ins.size(); ++i) {
ins.push_back(att->ins.at(i)->val.value);
unnormeds.push_back(att->unnormeds.at(i)->val.value);
}
for (int i = 0; i < max_in_count - att->ins.size(); ++i) {
ins.push_back(NULL);
unnormeds.push_back(NULL);
}
}
raw_masks.reserve(count);
for (auto &p : masks) {
raw_masks.push_back(p->value);
}
n3ldg_cuda::ScalarAttentionForward(ins, unnormeds, in_counts, count,
dim, raw_masks, vals);
#if TEST_CUDA
for (Node *n : batch) {
n->compute();
AttentionSoftMaxNode *att = static_cast<AttentionSoftMaxNode*>(n);
n3ldg_cuda::Assert(n->val.verify(
"AttentionSoftMaxExecute forward"));
}
#endif
}
void backward() {
int count = batch.size();
std::vector<dtype*> losses, in_losses, unnormed_losses;
losses.reserve(count);
in_losses.reserve(count * max_in_count);
unnormed_losses.reserve(count * max_in_count);
for (Node *n : batch) {
losses.push_back(n->loss.value);
AttentionSoftMaxNode *att = static_cast<AttentionSoftMaxNode*>(n);
for (int i = 0; i < att->ins.size(); ++i) {
in_losses.push_back(att->ins.at(i)->loss.value);
unnormed_losses.push_back(att->unnormeds.at(i)->loss.value);
}
for (int i = 0; i < max_in_count - att->ins.size(); ++i) {
in_losses.push_back(NULL);
unnormed_losses.push_back(NULL);
}
}
n3ldg_cuda::ScalarAttentionBackward(losses, ins, raw_masks, in_counts,
count, dim, in_losses, unnormed_losses);
#if TEST_CUDA
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
for (Node *n : batch) {
AttentionSoftMaxNode *att = static_cast<AttentionSoftMaxNode*>(n);
for (Node *in : att->ins) {
n3ldg_cuda::Assert(in->loss.verify(
"AttentionSoftMaxExecute backward ins"));
}
for (Node *un : att->unnormeds) {
n3ldg_cuda::Assert(un->loss.verify(
"AttentionSoftMaxExecute backward unnormeds"));
}
}
#endif
}
};
#else
class AttentionSoftMaxExecute : public Execute {
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
#endif
inline PExecute AttentionSoftMaxNode::generate(bool bTrain, dtype cur_drop_factor) {
AttentionSoftMaxExecute* exec = new AttentionSoftMaxExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
#if USE_GPU
exec->dim = dim;
#endif
return exec;
}
class AttentionSoftMaxVNode : public Node {
public:
vector<Tensor1D> masks, mask_losses;
vector<Tensor1D> unnormed_masks;
Tensor1D sum;
vector<PNode> unnormeds;
vector<PNode> ins;
public:
AttentionSoftMaxVNode() : Node() {
ins.clear();
unnormeds.clear();
node_type = "AttentionSoftmaxV";
}
~AttentionSoftMaxVNode() {
masks.clear();
mask_losses.clear();
unnormed_masks.clear();
ins.clear();
unnormeds.clear();
}
inline void clearValue() {
Node::clearValue();
ins.clear();
unnormeds.clear();
#if !USE_GPU
sum.zero();
#endif
}
inline void setParam(int maxsize) {
masks.resize(maxsize);
mask_losses.resize(maxsize);
unnormed_masks.resize(maxsize);
}
inline void init(int ndim, dtype dropout) {
Node::init(ndim, dropout);
int count = masks.size();
for (int idx = 0; idx < count; idx++) {
masks[idx].init(ndim);
mask_losses[idx].init(ndim);
unnormed_masks[idx].init(ndim);
}
sum.init(ndim);
#if !USE_GPU
sum.zero();
#endif
}
public:
void forward(Graph *cg, const vector<PNode>& x, const vector<PNode>& a) {
if (x.size() == 0) {
std::cout << "empty inputs for attention help node" << std::endl;
return;
}
if (x.size() != a.size()) {
std::cout << "the number of input nodes does not equal the number of attention factors." << std::endl;
return;
}
int nSize = x.size();
ins.clear();
unnormeds.clear();
for (int i = 0; i < nSize; i++) {
if (x[i]->val.dim != dim || a[i]->val.dim != dim) {
std::cout << "input matrixes are not matched" << std::endl;
clearValue();
return;
}
ins.push_back(x[i]);
unnormeds.push_back(a[i]);
}
degree = 0;
for (int i = 0; i < nSize; i++) {
ins[i]->addParent(this);
unnormeds[i]->addParent(this);
}
cg->addNode(this);
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
inline bool typeEqual(PNode other) {
return Node::typeEqual(other);
}
public:
inline void compute() {
int nSize = ins.size();
sum.zero();
for (int i = 0; i < nSize; ++i) {
unnormed_masks[i].vec() = unnormeds[i]->val.vec().unaryExpr(ptr_fun(fexp));
sum.vec() += unnormed_masks[i].vec();
}
for (int i = 0; i < nSize; ++i) {
masks[i].vec() = unnormed_masks[i].vec() / sum.vec();
}
val.zero();
for (int i = 0; i < nSize; ++i) {
val.vec() += masks[i].vec() * ins[i]->val.vec();
}
}
void backward() {
int nSize = ins.size();
for (int i = 0; i < nSize; i++) {
ins[i]->loss.vec() += loss.vec() * masks[i].vec();
mask_losses[i].vec() = loss.vec() * ins[i]->val.vec();
}
for (int idx = 0; idx < dim; idx++) {
for (int i = 0; i < nSize; i++) {
for (int j = 0; j < nSize; j++) {
unnormeds[i]->loss[idx] -= masks[i][idx] * masks[j][idx] * mask_losses[j][idx];
if (i == j) {
unnormeds[i]->loss[idx] += masks[i][idx] * mask_losses[i][idx];
}
}
}
}
}
};
#if USE_GPU
class AttentionSoftMaxVExecute : public Execute {
public:
int dim;
std::vector<int> in_counts;
int max_in_count;
std::vector<std::shared_ptr<Tensor2D>> masks;
std::vector<dtype*> raw_masks;
std::vector<dtype*> ins;
void forward() {
int count = batch.size();
in_counts.reserve(count);
for (Node *n : batch) {
AttentionSoftMaxVNode *attention =
static_cast<AttentionSoftMaxVNode*>(n);
in_counts.push_back(attention->ins.size());
}
max_in_count = *std::max_element(in_counts.begin(), in_counts.end());
masks.reserve(count);
for (Node *n : batch) {
AttentionSoftMaxVNode *attention =
static_cast<AttentionSoftMaxVNode*>(n);
in_counts.push_back(attention->ins.size());
auto p = std::make_shared<Tensor2D>();
p->init(dim, max_in_count);
masks.push_back(p);
}
std::vector<dtype*> unnormeds, vals;
ins.reserve(count * max_in_count);
unnormeds.reserve(count * max_in_count);
vals.reserve(count);
for (Node *n : batch) {
AttentionSoftMaxVNode *att =
static_cast<AttentionSoftMaxVNode*>(n);
vals.push_back(att->val.value);
for (int i = 0; i < att->ins.size(); ++i) {
#if TEST_CUDA
n3ldg_cuda::Assert(att->ins.at(i)->val.verify("AttentionSoftMaxVExecute forward initial val"));
#endif
ins.push_back(att->ins.at(i)->val.value);
#if TEST_CUDA
n3ldg_cuda::Assert(att->unnormeds.at(i)->val.verify("AttentionSoftMaxVExecute forward initial unnormeds"));
#endif
unnormeds.push_back(att->unnormeds.at(i)->val.value);
}
for (int i = 0; i < max_in_count - att->ins.size(); ++i) {
ins.push_back(NULL);
unnormeds.push_back(NULL);
}
}
raw_masks.reserve(count);
for (auto &p : masks) {
raw_masks.push_back(p->value);
}
n3ldg_cuda::VectorAttentionForward(ins, unnormeds, in_counts, count,
dim, raw_masks, vals);
#if TEST_CUDA
for (Node *n : batch) {
n->compute();
AttentionSoftMaxVNode *att =
static_cast<AttentionSoftMaxVNode*>(n);
n3ldg_cuda::Assert(n->val.verify(
"AttentionSoftMaxVExecute forward"));
}
#endif
}
void backward() {
int count = batch.size();
std::vector<dtype*> losses, in_losses, unnormed_losses;
losses.reserve(count);
in_losses.reserve(count * max_in_count);
unnormed_losses.reserve(count * max_in_count);
for (Node *n : batch) {
losses.push_back(n->loss.value);
AttentionSoftMaxVNode *att = static_cast<AttentionSoftMaxVNode*>(n);
for (int i = 0; i < att->ins.size(); ++i) {
in_losses.push_back(att->ins.at(i)->loss.value);
unnormed_losses.push_back(att->unnormeds.at(i)->loss.value);
}
for (int i = 0; i < max_in_count - att->ins.size(); ++i) {
in_losses.push_back(NULL);
unnormed_losses.push_back(NULL);
}
}
n3ldg_cuda::VectorAttentionBackward(losses, ins, raw_masks, in_counts,
count, dim, in_losses, unnormed_losses);
#if TEST_CUDA
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
for (Node *n : batch) {
AttentionSoftMaxVNode *att = static_cast<AttentionSoftMaxVNode*>(n);
for (Node *in : att->ins) {
n3ldg_cuda::Assert(in->loss.verify(
"AttentionSoftMaxExecute backward ins"));
}
for (Node *un : att->unnormeds) {
n3ldg_cuda::Assert(un->loss.verify(
"AttentionSoftMaxExecute backward unnormeds"));
}
}
#endif
}
};
#else
class AttentionSoftMaxVExecute : public Execute {
public:
inline void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
inline void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
};
#endif
inline PExecute AttentionSoftMaxVNode::generate(bool bTrain, dtype cur_drop_factor) {
AttentionSoftMaxVExecute* exec = new AttentionSoftMaxVExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor;
#if USE_GPU
exec->dim = dim;
#endif
return exec;
}
#endif
|
GB_binop__ne_int16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_08__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_02__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_04__ne_int16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_int16)
// A*D function (colscale): GB (_AxD__ne_int16)
// D*A function (rowscale): GB (_DxB__ne_int16)
// C+=B function (dense accum): GB (_Cdense_accumB__ne_int16)
// C+=b function (dense accum): GB (_Cdense_accumb__ne_int16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_int16)
// C=scalar+B GB (_bind1st__ne_int16)
// C=scalar+B' GB (_bind1st_tran__ne_int16)
// C=A+scalar GB (_bind2nd__ne_int16)
// C=A'+scalar GB (_bind2nd_tran__ne_int16)
// C type: bool
// A type: int16_t
// A pattern? 0
// B type: int16_t
// B pattern? 0
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_NE || GxB_NO_INT16 || GxB_NO_NE_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__ne_int16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__ne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
int16_t alpha_scalar ;
int16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((int16_t *) alpha_scalar_in)) ;
beta_scalar = (*((int16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__ne_int16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__ne_int16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__ne_int16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__ne_int16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__ne_int16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__ne_int16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
pi_omp_tasks.c | /*
* Compute pi by approximating the area under the curve f(x) = 4 / (1 + x*x)
* between 0 and 1.
*
* Parallel version using OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <omp.h> /* OpenMP */
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)(time.tv_sec * 1000000L + time.tv_usec));
}
#define NUMITERS 10000
#define MINTASKS 2
#define MAXTASKS 64
#define STEPTASKS 2
double difference(int num_tasks, long int num_steps) {
double x, sum;
double step = 1.0/(double) num_steps;
double stamp1 = getusec_();
for (int rep=0; rep<NUMITERS ; rep++)
for (int iter=0; iter<num_tasks ; iter++) {
sum = 0.0;
for (long int i=0; i<num_steps; ++i) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
}
}
stamp1 = getusec_()-stamp1;
double stamp2 = getusec_();
for (int rep=0; rep<NUMITERS ; rep++) {
for (int iter=0; iter<num_tasks ; iter++) {
sum = 0.0;
#pragma omp task private(x) firstprivate(sum)
for (long int i=0; i<num_steps; ++i) {
x = (i+0.5)*step;
sum += 4.0/(1.0+x*x);
}
}
#pragma omp taskwait
}
stamp2 = getusec_()-stamp2;
return((stamp2 - stamp1)/NUMITERS);
}
int main(int argc, char *argv[]) {
const char Usage[] = "Usage: pi <num_steps> <num_threads> \n";
if (argc < 3) {
fprintf(stderr, Usage);
exit(1);
}
long int num_steps = atoi(argv[1]);
int num_threads = atoi(argv[2]);
printf("All overheads expressed in microseconds\n");
printf("Ntasks\tOverhead per task\n");
#pragma omp parallel num_threads(num_threads)
#pragma omp single
{
difference(MINTASKS, num_steps);
for (int n_tasks=MINTASKS; n_tasks<=MAXTASKS; n_tasks+=STEPTASKS) {
double tmp = difference(n_tasks, num_steps);
printf("%d\t%.4f\n", n_tasks, tmp/n_tasks);
}
}
return EXIT_SUCCESS;
}
|
base.c | #define _DEFAULT_SOURCE
#include <stdio.h>
#include <unistd.h>
#include <omp.h>
int NITER=100;
int N=100;
int main(){
for(int i = 0; i < NITER; ++i){
float counter = 0.0;
#pragma omp parallel for
for(int j = 0; j < N; ++j){
// Useful work here
counter = counter + 1;
usleep(1);
}
}
}
|
mixed_tentusscher_myo_epi_2004_S1_11.c | // Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S1_11.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5974707074450,0.00128204739651212,0.780428330052312,0.780324688807244,0.000173918859429422,0.485394289096723,0.00293418407709962,0.999998358088272,1.92408383972285e-08,1.88371365492290e-05,0.999776458422915,1.00642295564641,0.999972802863595,6.06924119244823e-05,0.795740043602779,8.27776251832260,141.140921542934}; for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.1349202225894,0.000240126904910241,0.000150567504696109,0.000914438640048699,0.279364139406214,0.155245941656985,0.146985402923449,3.81282800438800,0.0154593435039349,3.57525801266453,1092.26054732627,0.000547743717896084,0.469367713522088,0.0107548262925547,0.00411107128565493,4.94125717103876e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
mat_mul_p4a_8000.c | /*
* file for mat_mul.c
*/
#include "./mat_mul.h"
#include "./size.h"
void mat_mul(int *a, int *b, int *c);
void mat_mul(int *a, int *b, int *c)
{
int i, j, k, t;
#pragma omp parallel for private(j, t, k)
for(i = 0; i <= 7999; i += 1)
for(j = 0; j <= 7999; j += 1) {
c[i*8000+j] = 0;
for(k = 0; k <= 7999; k += 1)
for(t = 0; t <= 99; t += 1)
c[i*8000+j] += a[i*8000+k]*b[j*8000+k];
}
return;
}
|
veccopy-ompt-target-tracing.c | #include <stdio.h>
#include <assert.h>
#include <omp.h>
#include "callbacks.h"
static int start_trace();
static int flush_trace();
static int stop_trace();
int main()
{
int N = 100000;
int a[N];
int b[N];
int i;
for (i=0; i<N; i++)
a[i]=0;
for (i=0; i<N; i++)
b[i]=i;
start_trace();
#pragma omp target parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
flush_trace();
stop_trace();
start_trace();
#pragma omp target teams distribute parallel for
{
for (int j = 0; j< N; j++)
a[j]=b[j];
}
stop_trace();
int rc = 0;
for (i=0; i<N; i++)
if (a[i] != b[i] ) {
rc++;
printf ("Wrong value: a[%d]=%d\n", i, a[i]);
}
if (!rc)
printf("Success\n");
return rc;
}
|
Cylinder.h | #ifndef CYLINDER_HEADER
#define CYLINDER_HEADER
#include "basic.h"
#include <stdexcept>
#include <utility>
#include <MiscLib/Vector.h>
#include "PointCloud.h"
#include <ostream>
#include <istream>
#include <GfxTL/HyperplaneCoordinateSystem.h>
#include <stdio.h>
#include <MiscLib/NoShrinkVector.h>
#include "LevMarLSWeight.h"
#include "LevMarFitting.h"
#ifndef DLL_LINKAGE
#define DLL_LINKAGE
#endif
class DLL_LINKAGE Cylinder
{
public:
struct ParallelNormalsError
: public std::runtime_error
{
ParallelNormalsError();
};
enum { RequiredSamples = 2 };
Cylinder();
Cylinder(const Vec3f &axisDir, const Vec3f &axisPos, float radius);
Cylinder(const Vec3f &pointA, const Vec3f &pointB,
const Vec3f &normalA, const Vec3f &normalB);
bool Init(const MiscLib::Vector< Vec3f > &samples);
bool InitAverage(const MiscLib::Vector< Vec3f > &samples);
bool Init(const Vec3f &axisDir, const Vec3f &axisPos, float radius);
bool Init(const Vec3f &pointA, const Vec3f &pointB,
const Vec3f &normalA, const Vec3f &normalB);
bool Init(bool binary, std::istream *i);
void Init(FILE *i);
void Init(float* array);
inline float Distance(const Vec3f &p) const;
inline void Normal(const Vec3f &p, Vec3f *normal) const;
inline float DistanceAndNormal(const Vec3f &p, Vec3f *normal) const;
inline float SignedDistance(const Vec3f &p) const;
void Project(const Vec3f &p, Vec3f *pp) const;
// parameters are (height, angle)
void Parameters(const Vec3f &p,
std::pair< float, float > *param) const;
float Radius() const;
float &Radius();
const Vec3f &AxisDirection() const;
Vec3f &AxisDirection();
const Vec3f &AxisPosition() const;
Vec3f &AxisPosition();
const Vec3f AngularDirection() const;
void RotateAngularDirection(float radians);
bool LeastSquaresFit(const PointCloud &pc,
MiscLib::Vector< size_t >::const_iterator begin,
MiscLib::Vector< size_t >::const_iterator end);
template< class IteratorT >
bool LeastSquaresFit(IteratorT begin, IteratorT end);
bool Fit(const PointCloud &pc,
MiscLib::Vector< size_t >::const_iterator begin,
MiscLib::Vector< size_t >::const_iterator end)
{ return LeastSquaresFit(pc, begin, end); }
static bool Interpolate(const MiscLib::Vector< Cylinder > &cylinders,
const MiscLib::Vector< float > &weights, Cylinder *ic);
void Serialize(bool binary, std::ostream *o) const;
static size_t SerializedSize();
void Serialize(FILE *o) const;
void Serialize(float* array) const;
static size_t SerializedFloatSize();
void Transform(float scale, const Vec3f &translate);
void Transform(const GfxTL::MatrixXX< 3, 3, float > &rot,
const GfxTL::Vector3Df &trans);
inline unsigned int Intersect(const Vec3f &p, const Vec3f &r,
float *first, float *second) const;
private:
template< class WeightT >
class LevMarCylinder
: public WeightT
{
public:
enum { NumParams = 7 };
typedef float ScalarType;
template< class IteratorT >
ScalarType Chi(const ScalarType *params, IteratorT begin, IteratorT end,
ScalarType *values, ScalarType *temp) const
{
ScalarType chi = 0;
int size = end - begin;
#ifdef DOPARALLEL
#pragma omp parallel for schedule(static) reduction(+:chi)
#endif
for(int idx = 0; idx < size; ++idx)
{
Vec3f s;
for(unsigned int j = 0; j < 3; ++j)
s[j] = begin[idx][j] - params[j];
ScalarType u = params[5] * s[1] - params[4] * s[2];
u *= u;
ScalarType v = params[3] * s[2] - params[5] * s[0];
u += v * v;
v = params[4] * s[0] - params[3] * s[1];
u += v * v;
temp[idx] = std::sqrt(u);
chi += (values[idx] = WeightT::Weigh(temp[idx] - params[6]))
* values[idx];
}
return chi;
}
template< class IteratorT >
void Derivatives(const ScalarType *params, IteratorT begin, IteratorT end,
const ScalarType *values, const ScalarType *temp, ScalarType *matrix) const
{
int size = end - begin;
#ifdef DOPARALLEL
#pragma omp parallel for schedule(static)
#endif
for(int idx = 0; idx < size; ++idx)
{
Vec3f s;
for(unsigned int j = 0; j < 3; ++j)
s[j] = begin[idx][j] - params[j];
ScalarType g = s[0] * begin[idx][0] + s[1] * begin[idx][1]
+ s[2] * begin[idx][2];
if(temp[idx] < 1.0e-6)
{
matrix[idx * NumParams + 0] = std::sqrt(1 - params[3] * params[3]);
matrix[idx * NumParams + 1] = std::sqrt(1 - params[4] * params[4]);
matrix[idx * NumParams + 2] = std::sqrt(1 - params[5] * params[5]);
}
else
{
matrix[idx * NumParams + 0] = (params[3] * g - s[0]) / temp[idx];
matrix[idx * NumParams + 1] = (params[4] * g - s[1]) / temp[idx];
matrix[idx * NumParams + 2] = (params[5] * g - s[2]) / temp[idx];
}
matrix[idx * NumParams + 3] = g * matrix[idx * NumParams + 0];
matrix[idx * NumParams + 4] = g * matrix[idx * NumParams + 1];
matrix[idx * NumParams + 5] = g * matrix[idx * NumParams + 2];
matrix[idx * NumParams + 6] = -1;
WeightT::template DerivWeigh< NumParams >(temp[idx] - params[6],
matrix + idx * NumParams);
}
}
void Normalize(ScalarType *params) const
{
ScalarType l = std::sqrt(params[3] * params[3] + params[4] * params[4]
+ params[5] * params[5]);
for(unsigned int i = 3; i < 6; ++i)
params[i] /= l;
// find point on axis closest to origin
float lambda = -(params[0] * params[3] + params[1] * params[4] +
params[2] * params[5]);
for(unsigned int i = 0; i < 3; ++i)
params[i] = params[i] + lambda * params[i + 3];
}
};
private:
Vec3f m_axisDir;
Vec3f m_axisPos;
float m_radius;
GfxTL::HyperplaneCoordinateSystem< float, 3 > m_hcs;
float m_angularRotatedRadians;
};
inline float Cylinder::Distance(const Vec3f &p) const
{
Vec3f diff = p - m_axisPos;
float lambda = m_axisDir.dot(diff);
float axisDist = (diff - lambda * m_axisDir).length();
return fabs(axisDist - m_radius);
}
inline void Cylinder::Normal(const Vec3f &p, Vec3f *normal) const
{
Vec3f diff = p - m_axisPos;
float lambda = m_axisDir.dot(diff);
*normal = diff - lambda * m_axisDir;
normal->normalize();
}
inline float Cylinder::DistanceAndNormal(const Vec3f &p, Vec3f *normal) const
{
Vec3f diff = p - m_axisPos;
float lambda = m_axisDir.dot(diff);
*normal = diff - lambda * m_axisDir;
float axisDist = normal->length();
if(axisDist > 0)
*normal /= axisDist;
return fabs(axisDist - m_radius);
}
inline float Cylinder::SignedDistance(const Vec3f &p) const
{
Vec3f diff = p - m_axisPos;
float lambda = m_axisDir.dot(diff);
float axisDist = (diff - lambda * m_axisDir).length();
return axisDist - m_radius;
}
template< class IteratorT >
bool Cylinder::LeastSquaresFit(IteratorT begin, IteratorT end)
{
float param[7];
for(size_t i = 0; i < 3; ++i)
param[i] = m_axisPos[i];
for(size_t i = 0; i < 3; ++i)
param[i + 3] = m_axisDir[i];
param[6] = m_radius;
LevMarCylinder< LevMarLSWeight > levMarCylinder;
if(!LevMar(begin, end, levMarCylinder, param))
return false;
for(size_t i = 0; i < 3; ++i)
m_axisPos[i] = param[i];
for(size_t i = 0; i < 3; ++i)
m_axisDir[i] = param[i + 3];
m_radius = param[6];
m_hcs.FromNormal(m_axisDir);
m_angularRotatedRadians = 0;
return true;
}
inline unsigned int Cylinder::Intersect(const Vec3f &p, const Vec3f &r,
float *first, float *second) const
{
using namespace std;
// Create a coordinate system for the cylinder. In this system, the
// cylinder segment center C is the origin and the cylinder axis direction
// W is the z-axis. U and V are the other coordinate axis directions.
// If P = x*U+y*V+z*W, the cylinder is x^2 + y^2 = r^2, where r is the
// cylinder radius. The end caps are |z| = h/2, where h is the cylinder
// height.
float fRSqr = m_radius * m_radius;
// convert incoming line origin to cylinder coordinates
Vec3f kDiff = p - m_axisPos;
Vec3f kP(kDiff.dot(m_hcs[0]), kDiff.dot(m_hcs[1]), m_axisDir.dot(kDiff));
// Get the z-value, in cylinder coordinates, of the incoming line's
// unit-length direction.
float fDz = m_axisDir.dot(r);
if(fabs(fDz) >= 1.f - 1e-7f)
// The line is parallel to the cylinder axis.
return 0;
// convert incoming line unit-length direction to cylinder coordinates
Vec3f kD(r.dot(m_hcs[0]), r.dot(m_hcs[1]), r.dot(m_axisDir));
float fA0, fA1, fA2, fDiscr, fRoot, fInv;
// Test intersection of line P+t*D with infinite cylinder
// x^2+y^2 = r^2. This reduces to computing the roots of a
// quadratic equation. If P = (px,py,pz) and D = (dx,dy,dz),
// then the quadratic equation is
// (dx^2+dy^2)*t^2 + 2*(px*dx+py*dy)*t + (px^2+py^2-r^2) = 0
fA0 = kP[0]*kP[0] + kP[1]*kP[1] - fRSqr;
fA1 = kP[0]*kD[0] + kP[1]*kD[1];
fA2 = kD[0]*kD[0] + kD[1]*kD[1];
fDiscr = fA1*fA1 - fA0*fA2;
if (fDiscr < 0)
// line does not intersect cylinder
return 0;
else if (fDiscr > 1e-7f)
{
// line intersects cylinder in two places
fRoot = sqrt(fDiscr);
fInv = (1.f)/fA2;
*first = (-fA1 - fRoot)*fInv;
*second = (-fA1 + fRoot)*fInv;
return 2;
}
// line is tangent to the cylinder
*first = -fA1/fA2;
return 1;
}
#endif
|
kernel2_wrapper.c | #include <stdio.h>
#include <string.h>
#include <omp.h>
#include "../common.h" // (in directory provided here)
#include "../util/timer/timer.h" // (in directory provided here)
#include "./kernel2_wrapper.h" // (in directory provided here)
//========================================================================================================================================================================================================200
// KERNEL_GPU_CUDA_WRAPPER FUNCTION
//========================================================================================================================================================================================================200
void
kernel2_wrapper(
knode *knodes,
long knodes_elem,
long knodes_mem, // not length in byte
int order,
long maxheight,
int count,
long *currKnode,
long *offset,
long *lastKnode,
long *offset_2,
int *start,
int *end,
int *recstart,
int *reclength)
{
//======================================================================================================================================================150
// CPU VARIABLES
//======================================================================================================================================================150
// timer
long long offload_start = get_time();
// findRangeK kernel
size_t threads;
threads = order < 256 ? order : 256;
#pragma omp target data map(to: knodes[0: knodes_mem],\
start[0: count],\
end[0: count],\
currKnode[0: count],\
offset[0: count],\
lastKnode[0: count],\
offset_2[0: count])\
map(tofrom: recstart[0: count])\
map(from: reclength[0: count])
{
#pragma omp target teams num_teams(count) thread_limit(threads)
{
#pragma omp parallel
{
// private thread IDs
int thid = omp_get_thread_num();
int bid = omp_get_team_num();
int i;
for(i = 0; i < maxheight; i++){
if((knodes[currKnode[bid]].keys[thid] <= start[bid]) && (knodes[currKnode[bid]].keys[thid+1] > start[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodes[currKnode[bid]].indices[thid] < knodes_elem) {
offset[bid] = knodes[currKnode[bid]].indices[thid];
}
}
if((knodes[lastKnode[bid]].keys[thid] <= end[bid]) && (knodes[lastKnode[bid]].keys[thid+1] > end[bid])){
// this conditional statement is inserted to avoid crush due to but in original code
// "offset_2[bid]" calculated below that later addresses part of knodes goes outside of its bounds cause segmentation fault
// more specifically, values saved into knodes->indices in the main function are out of bounds of knodes that they address
if(knodes[lastKnode[bid]].indices[thid] < knodes_elem) {
offset_2[bid] = knodes[lastKnode[bid]].indices[thid];
}
}
#pragma omp barrier
// set for next tree level
if(thid==0){
currKnode[bid] = offset[bid];
lastKnode[bid] = offset_2[bid];
}
#pragma omp barrier
}
// Find the index of the starting record
if(knodes[currKnode[bid]].keys[thid] == start[bid]){
recstart[bid] = knodes[currKnode[bid]].indices[thid];
}
#pragma omp barrier
// Find the index of the ending record
if(knodes[lastKnode[bid]].keys[thid] == end[bid]){
reclength[bid] = knodes[lastKnode[bid]].indices[thid] - recstart[bid]+1;
}
}
}
}
long long offload_end = get_time();
#ifdef DEBUG
for (int i = 0; i < count; i++)
printf("recstart[%d] = %d\n", i, recstart[i]);
for (int i = 0; i < count; i++)
printf("reclength[%d] = %d\n", i, reclength[i]);
#endif
// DISPLAY TIMING
printf("Device offloading time:\n");
printf("%.12f s\n", (float) (offload_end-offload_start) / 1000000);
}
|
matMult_SSE.c | /*
* File: matMult.c
* Author: Malcolm Davis
* Course: Computer Architecture II
* Created on Apr 20, 2018
* 4x4 matrix multiplication
*
* Usage:
* ./matMult for default parameters and random matrixes or;
* ./matMult v1.1.1 v1.1.2 ... v1.1.4 ... v1.2.1 v1.2.2 ... v1.2.4 ... v2.4.1 v2.4.2 ... v2.4.4
*/
#include "xmmintrin.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
void usage(){
printf("Usage:\n ./matMult for default parameters and random matrixes or;\n\
./matMult v1.1.1 v1.1.2 ... v1.1.4 ... v1.2.1 v1.2.2 ... v1.2.4 ... v2.4.1 v2.4.2 ... v2.4.4 \n");
}
/*
* Prints a __m128i vector on console
* @param v the vector to print
*/
void printVector(__m128i* v){
float * pointer = (float*)v;
for (int i = 0; i < 4; ++i)
{
printf("%f\t", *pointer);
pointer++;
}
printf("\n");
}
/*
* Prints a 4x4 float matrix on console
* @param matrix pointer to the matrix to print
*/
void prinMatrix(float* matrix){
float * pointer = matrix;
for (int i = 0; i < 16; ++i)
{
printf("%f\t", *pointer);
if((i+1)%4==0)
printf("\n");
pointer++;
}
printf("\n");
}
/*
* Transpose a 4x4 float matrix
* @param src the one to transpose
* @param out the one with the transpose result
*/
void transposeMatrix(float* src, float* dst){
//#pragma omp parallel for
for (int i = 0; i < 16; i++)
{
dst[i]=src[(int)(floor(i/4)+4*(i%4))];
}
printf("\n");
}
/*
* Main method, retrive command line options, and multiplies the matrixes
*/
int main(int argc, char ** argv){
//If the count of the input is not a 32(16x2(4x4)) greater than 2, then exit
if (argc != 33 && argc != 1){
usage();
return -1;
}
__m128 matrix1, matrix2, tmpRes, tmpV1, tmpV2;
static float m1[16], m2[16], m2t[16], result[16], *tmpPointer;
if(argc == 1){
srand (time(NULL));
//If no arguments then generate random matrices
#pragma omp parallel for
for (int i = 0; i < 16; ++i)
{
m1[i] = rand();
m2[i] = rand();
}
} else{
//If arguments then set the values into a vector
#pragma omp parallel for
for (int i = 0; i < 16; ++i)
{
m1[i]=atof(argv[i+1]);
m2[i]=atof(argv[i+17]);
}
}
matrix1 = *(__m128*)m1;
matrix2 = *(__m128*)m2;
printf("Matrix 1: \n");
prinMatrix(m1);
printf("Matrix 2: \n");
prinMatrix(m2);
transposeMatrix(m2, m2t);
#pragma omp parallel for private(tmpV1, tmpV2, tmpPointer, tmpRes)
for (int i = 0; i < 16; ++i)
{
tmpV1 = *(((__m128*)m1)+(int)floor(i/4));
tmpV2 = *(((__m128*)m2t)+i%4);
tmpRes = _mm_mul_ps(tmpV1, tmpV2);
tmpPointer = (float*)&tmpRes;
result[i] =0;
for (int j = 0; j < 4; ++j)
{
result[i] += *(tmpPointer++);
}
}
printf("Result *********************** \n");
printf("Result: \n");
prinMatrix(result);
} |
private.c | #include "private.h"
#define CG_BEST_TOL 1e-9
#define CG_MIN_TOL 1e-1
#define PRINT_INTERVAL 100
static idxint totCgIts;
static timer linsysTimer;
static pfloat totalSolveTime;
char * getLinSysMethod(Data * d, Priv * p) {
char * str = scs_malloc(sizeof(char) * 128);
sprintf(str, "sparse-indirect, nnz in A = %li, CG tol ~ 1/iter^(%2.2f)", (long ) d->A->p[d->n], d->CG_RATE);
return str;
}
char * getLinSysSummary(Priv * p, Info * info) {
char * str = scs_malloc(sizeof(char) * 128);
sprintf(str, "\tLin-sys: avg # CG iterations: %2.2f, avg solve time: %1.2es\n",
(pfloat ) totCgIts / (info->iter + 1), totalSolveTime / (info->iter + 1) / 1e3);
totCgIts = 0;
totalSolveTime = 0;
return str;
}
/* M = inv ( diag ( RHO_X * I + A'A ) ) */
void getPreconditioner(Data *d, Priv *p) {
idxint i;
pfloat * M = p->M;
AMatrix * A = d->A;
#ifdef EXTRAVERBOSE
scs_printf("getting pre-conditioner\n");
#endif
for (i = 0; i < d->n; ++i) {
M[i] = 1 / (d->RHO_X + calcNormSq(&(A->x[A->p[i]]), A->p[i + 1] - A->p[i]));
/* M[i] = 1; */
}
#ifdef EXTRAVERBOSE
scs_printf("finished getting pre-conditioner\n");
#endif
}
static void transpose(Data * d, Priv * p) {
idxint * Ci = p->Ati;
idxint * Cp = p->Atp;
pfloat * Cx = p->Atx;
idxint m = d->m;
idxint n = d->n;
idxint * Ap = d->A->p;
idxint * Ai = d->A->i;
pfloat * Ax = d->A->x;
idxint i, j, q, *z, c1, c2;
#ifdef EXTRAVERBOSE
timer transposeTimer;
scs_printf("transposing A\n");
tic(&transposeTimer);
#endif
z = scs_calloc(m, sizeof(idxint));
for (i = 0; i < Ap[n]; i++)
z[Ai[i]]++; /* row counts */
cs_cumsum(Cp, z, m); /* row pointers */
for (j = 0; j < n; j++) {
c1 = Ap[j];
c2 = Ap[j + 1];
for (i = c1; i < c2; i++) {
q = z[Ai[i]];
Ci[q] = j; /* place A(i,j) as entry C(j,i) */
Cx[q] = Ax[i];
z[Ai[i]]++;
}
}
scs_free(z);
#ifdef EXTRAVERBOSE
scs_printf("finished transposing A, time: %1.2es\n", tocq(&transposeTimer) / 1e3);
#endif
}
void freePriv(Priv * p) {
if (p) {
if (p->p)
scs_free(p->p);
if (p->r)
scs_free(p->r);
if (p->Gp)
scs_free(p->Gp);
if (p->tmp)
scs_free(p->tmp);
if (p->Ati)
scs_free(p->Ati);
if (p->Atx)
scs_free(p->Atx);
if (p->Atp)
scs_free(p->Atp);
if (p->z)
scs_free(p->z);
if (p->M)
scs_free(p->M);
scs_free(p);
}
}
/* solves (I+A'A)x = b, s warm start, solution stored in b */
/*y = (RHO_X * I + A'A)x */
static void matVec(Data * d, Priv * p, const pfloat * x, pfloat * y) {
pfloat * tmp = p->tmp;
memset(tmp, 0, d->m * sizeof(pfloat));
accumByA(d, p, x, tmp);
memset(y, 0, d->n * sizeof(pfloat));
accumByAtrans(d, p, tmp, y);
addScaledArray(y, x, d->n, d->RHO_X);
}
void _accumByAtrans(idxint n, pfloat * Ax, idxint * Ai, idxint * Ap, const pfloat *x, pfloat *y) {
/* y = A'*x
A in column compressed format
parallelizes over columns (rows of A')
*/
idxint p, j;
idxint c1, c2;
pfloat yj;
#ifdef OPENMP
#pragma omp parallel for private(p,c1,c2,yj)
#endif
for (j = 0; j < n; j++) {
yj = y[j];
c1 = Ap[j];
c2 = Ap[j + 1];
for (p = c1; p < c2; p++) {
yj += Ax[p] * x[Ai[p]];
}
y[j] = yj;
}
}
void accumByAtrans(Data * d, Priv * p, const pfloat *x, pfloat *y) {
AMatrix * A = d->A;
_accumByAtrans(d->n, A->x, A->i, A->p, x, y);
}
void accumByA(Data * d, Priv * p, const pfloat *x, pfloat *y) {
_accumByAtrans(d->m, p->Atx, p->Ati, p->Atp, x, y);
}
static void applyPreConditioner(pfloat * M, pfloat * z, pfloat * r, idxint n, pfloat *ipzr) {
idxint i;
*ipzr = 0;
for (i = 0; i < n; ++i) {
z[i] = r[i] * M[i];
*ipzr += z[i] * r[i];
}
}
Priv * initPriv(Data * d) {
AMatrix * A = d->A;
Priv * p = scs_calloc(1, sizeof(Priv));
p->p = scs_malloc((d->n) * sizeof(pfloat));
p->r = scs_malloc((d->n) * sizeof(pfloat));
p->Gp = scs_malloc((d->n) * sizeof(pfloat));
p->tmp = scs_malloc((d->m) * sizeof(pfloat));
/* preconditioner memory */
p->z = scs_malloc((d->n) * sizeof(pfloat));
p->M = scs_malloc((d->n) * sizeof(pfloat));
p->Ati = scs_malloc((A->p[d->n]) * sizeof(idxint));
p->Atp = scs_malloc((d->m + 1) * sizeof(idxint));
p->Atx = scs_malloc((A->p[d->n]) * sizeof(pfloat));
transpose(d, p);
getPreconditioner(d, p);
totalSolveTime = 0;
totCgIts = 0;
if (!p->p || !p->r || !p->Gp || !p->tmp || !p->Ati || !p->Atp || !p->Atx) {
freePriv(p);
return NULL;
}
return p;
}
static idxint pcg(Data *d, Priv * pr, const pfloat * s, pfloat * b, idxint max_its, pfloat tol) {
idxint i, n = d->n;
pfloat ipzr, ipzrOld, alpha;
pfloat *p = pr->p; /* cg direction */
pfloat *Gp = pr->Gp; /* updated CG direction */
pfloat *r = pr->r; /* cg residual */
pfloat *z = pr->z; /* for preconditioning */
pfloat *M = pr->M; /* inverse diagonal preconditioner */
if (s == NULL) {
memcpy(r, b, n * sizeof(pfloat));
memset(b, 0, n * sizeof(pfloat));
} else {
matVec(d, pr, s, r);
addScaledArray(r, b, n, -1);
scaleArray(r, -1, n);
memcpy(b, s, n * sizeof(pfloat));
}
applyPreConditioner(M, z, r, n, &ipzr);
memcpy(p, z, n * sizeof(pfloat));
for (i = 0; i < max_its; ++i) {
matVec(d, pr, p, Gp);
alpha = ipzr / innerProd(p, Gp, n);
addScaledArray(b, p, n, alpha);
addScaledArray(r, Gp, n, -alpha);
if (calcNorm(r, n) < tol) {
#ifdef EXTRAVERBOSE
scs_printf("tol: %.4e, resid: %.4e, iters: %li\n", tol, calcNorm(r, n), (long) i+1);
#endif
return i + 1;
}
ipzrOld = ipzr;
applyPreConditioner(M, z, r, n, &ipzr);
scaleArray(p, ipzr / ipzrOld, n);
addScaledArray(p, z, n, 1);
}
return i;
}
idxint solveLinSys(Data *d, Priv * p, pfloat * b, const pfloat * s, idxint iter) {
idxint cgIts;
pfloat cgTol = calcNorm(b, d->n) * (iter < 0 ? CG_BEST_TOL : CG_MIN_TOL / POWF((pfloat) iter + 1, d->CG_RATE));
tic(&linsysTimer);
/* solves Mx = b, for x but stores result in b */
/* s contains warm-start (if available) */
accumByAtrans(d, p, &(b[d->n]), b);
/* solves (I+A'A)x = b, s warm start, solution stored in b */
cgIts = pcg(d, p, s, b, d->n, MAX(cgTol, CG_BEST_TOL));
scaleArray(&(b[d->n]), -1, d->m);
accumByA(d, p, b, &(b[d->n]));
if (iter >= 0) {
totCgIts += cgIts;
}
totalSolveTime += tocq(&linsysTimer);
#ifdef EXTRAVERBOSE
scs_printf("linsys solve time: %1.2es\n", tocq(&linsysTimer) / 1e3);
#endif
return 0;
}
|
paint.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP AAA IIIII N N TTTTT %
% P P A A I NN N T %
% PPPP AAAAA I N N N T %
% P A A I N NN T %
% P A A IIIII N N T %
% %
% %
% Methods to Paint on an Image %
% %
% Software Design %
% Cristy %
% July 1998 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/draw-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/resource_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o o d f i l l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FloodfillPaintImage() changes the color value of any pixel that matches
% target and is an immediate neighbor. If the method FillToBorderMethod is
% specified, the color value is changed for any neighbor pixel that does not
% match the bordercolor member of image.
%
% By default target must match a particular pixel color exactly. However,
% in many cases two colors may differ by a small amount. The fuzz member of
% image defines how much tolerance is acceptable to consider two colors as
% the same. For example, set fuzz to 10 and the color red at intensities of
% 100 and 102 respectively are now interpreted as the same color for the
% purposes of the floodfill.
%
% The format of the FloodfillPaintImage method is:
%
% MagickBooleanType FloodfillPaintImage(Image *image,
% const DrawInfo *draw_info,const PixelInfo target,
% const ssize_t x_offset,const ssize_t y_offset,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o draw_info: the draw info.
%
% o target: the RGB value of the target color.
%
% o x_offset,y_offset: the starting location of the operation.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType FloodfillPaintImage(Image *image,
const DrawInfo *draw_info,const PixelInfo *target,const ssize_t x_offset,
const ssize_t y_offset,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define MaxStacksize 524288UL
#define PushSegmentStack(up,left,right,delta) \
{ \
if (s >= (segment_stack+MaxStacksize)) \
ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \
else \
{ \
if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \
{ \
s->x1=(double) (left); \
s->y1=(double) (up); \
s->x2=(double) (right); \
s->y2=(double) (delta); \
s++; \
} \
} \
}
CacheView
*floodplane_view,
*image_view;
Image
*floodplane_image;
MagickBooleanType
skip,
status;
MemoryInfo
*segment_info;
PixelInfo
fill_color,
pixel;
register SegmentInfo
*s;
SegmentInfo
*segment_stack;
ssize_t
offset,
start,
x1,
x2,
y;
/*
Check boundary conditions.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(draw_info != (DrawInfo *) NULL);
assert(draw_info->signature == MagickCoreSignature);
if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns))
return(MagickFalse);
if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows))
return(MagickFalse);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
if ((image->alpha_trait == UndefinedPixelTrait) &&
(draw_info->fill.alpha_trait != UndefinedPixelTrait))
(void) SetImageAlpha(image,OpaqueAlpha,exception);
/*
Set floodfill state.
*/
floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (floodplane_image == (Image *) NULL)
return(MagickFalse);
floodplane_image->alpha_trait=UndefinedPixelTrait;
floodplane_image->colorspace=GRAYColorspace;
(void) QueryColorCompliance("#000",AllCompliance,
&floodplane_image->background_color,exception);
(void) SetImageBackgroundColor(floodplane_image,exception);
segment_info=AcquireVirtualMemory(MaxStacksize,sizeof(*segment_stack));
if (segment_info == (MemoryInfo *) NULL)
{
floodplane_image=DestroyImage(floodplane_image);
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
}
segment_stack=(SegmentInfo *) GetVirtualMemoryBlob(segment_info);
/*
Push initial segment on stack.
*/
status=MagickTrue;
start=0;
s=segment_stack;
PushSegmentStack(y_offset,x_offset,x_offset,1);
PushSegmentStack(y_offset+1,x_offset,x_offset,-1);
GetPixelInfo(image,&pixel);
image_view=AcquireVirtualCacheView(image,exception);
floodplane_view=AcquireAuthenticCacheView(floodplane_image,exception);
while (s > segment_stack)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Pop segment off stack.
*/
s--;
x1=(ssize_t) s->x1;
x2=(ssize_t) s->x2;
offset=(ssize_t) s->y2;
y=(ssize_t) s->y1+offset;
/*
Recolor neighboring pixels.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,(size_t) (x1+1),1,exception);
q=GetCacheViewAuthenticPixels(floodplane_view,0,y,(size_t) (x1+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
p+=x1*GetPixelChannels(image);
q+=x1*GetPixelChannels(floodplane_image);
for (x=x1; x >= 0; x--)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p-=GetPixelChannels(image);
q-=GetPixelChannels(floodplane_image);
}
if (SyncCacheViewAuthenticPixels(floodplane_view,exception) == MagickFalse)
break;
skip=x >= x1 ? MagickTrue : MagickFalse;
if (skip == MagickFalse)
{
start=x+1;
if (start < x1)
PushSegmentStack(y,start,x1-1,-offset);
x=x1+1;
}
do
{
if (skip == MagickFalse)
{
if (x < (ssize_t) image->columns)
{
p=GetCacheViewVirtualPixels(image_view,x,y,image->columns-x,1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,image->columns-
x,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) == invert)
break;
SetPixelGray(floodplane_image,QuantumRange,q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
status=SyncCacheViewAuthenticPixels(floodplane_view,exception);
if (status == MagickFalse)
break;
}
PushSegmentStack(y,start,x-1,offset);
if (x > (x2+1))
PushSegmentStack(y,x2+1,x-1,-offset);
}
skip=MagickFalse;
x++;
if (x <= x2)
{
p=GetCacheViewVirtualPixels(image_view,x,y,(size_t) (x2-x+1),1,
exception);
q=GetCacheViewAuthenticPixels(floodplane_view,x,y,(size_t) (x2-x+1),1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for ( ; x <= x2; x++)
{
if (GetPixelGray(floodplane_image,q) != 0)
break;
GetPixelInfoPixel(image,p,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(floodplane_image);
}
}
start=x;
} while (x <= x2);
}
status=MagickTrue;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Tile fill color onto floodplane.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(floodplane_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelGray(floodplane_image,p) != 0)
{
GetFillColor(draw_info,x,y,&fill_color,exception);
SetPixelViaPixelInfo(image,&fill_color,q);
}
p+=GetPixelChannels(floodplane_image);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
floodplane_view=DestroyCacheView(floodplane_view);
image_view=DestroyCacheView(image_view);
segment_info=RelinquishVirtualMemory(segment_info);
floodplane_image=DestroyImage(floodplane_image);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G r a d i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GradientImage() applies a continuously smooth color transitions along a
% vector from one color to another.
%
% Note, the interface of this method will change in the future to support
% more than one transistion.
%
% The format of the GradientImage method is:
%
% MagickBooleanType GradientImage(Image *image,const GradientType type,
% const SpreadMethod method,const PixelInfo *start_color,
% const PixelInfo *stop_color,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the gradient type: linear or radial.
%
% o spread: the gradient spread meathod: pad, reflect, or repeat.
%
% o start_color: the start color.
%
% o stop_color: the stop color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GradientImage(Image *image,
const GradientType type,const SpreadMethod method,const StopInfo *stops,
const size_t number_stops,ExceptionInfo *exception)
{
const char
*artifact;
DrawInfo
*draw_info;
GradientInfo
*gradient;
MagickBooleanType
status;
/*
Set gradient start-stop end points.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(stops != (const StopInfo *) NULL);
assert(number_stops > 0);
draw_info=AcquireDrawInfo();
gradient=(&draw_info->gradient);
gradient->type=type;
gradient->bounding_box.width=image->columns;
gradient->bounding_box.height=image->rows;
artifact=GetImageArtifact(image,"gradient:bounding-box");
if (artifact != (const char *) NULL)
(void) ParseAbsoluteGeometry(artifact,&gradient->bounding_box);
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
artifact=GetImageArtifact(image,"gradient:direction");
if (artifact != (const char *) NULL)
{
GravityType
direction;
direction=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,artifact);
switch (direction)
{
case NorthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case NorthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=(double) image->rows-1;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case WestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=0.0;
break;
}
case EastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=0.0;
break;
}
case SouthWestGravity:
{
gradient->gradient_vector.x1=(double) image->columns-1;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
case SouthGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=0.0;
gradient->gradient_vector.y2=(double) image->columns-1;
break;
}
case SouthEastGravity:
{
gradient->gradient_vector.x1=0.0;
gradient->gradient_vector.y1=0.0;
gradient->gradient_vector.x2=(double) image->columns-1;
gradient->gradient_vector.y2=(double) image->rows-1;
break;
}
default:
break;
}
}
artifact=GetImageArtifact(image,"gradient:angle");
if (artifact != (const char *) NULL)
gradient->angle=StringToDouble(artifact,(char **) NULL);
artifact=GetImageArtifact(image,"gradient:vector");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf%*[ ,]%lf%*[ ,]%lf",
&gradient->gradient_vector.x1,&gradient->gradient_vector.y1,
&gradient->gradient_vector.x2,&gradient->gradient_vector.y2);
if ((GetImageArtifact(image,"gradient:angle") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:direction") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:extent") == (const char *) NULL) &&
(GetImageArtifact(image,"gradient:vector") == (const char *) NULL))
if ((type == LinearGradient) && (gradient->gradient_vector.y2 != 0.0))
gradient->gradient_vector.x2=0.0;
gradient->center.x=(double) gradient->gradient_vector.x2/2.0;
gradient->center.y=(double) gradient->gradient_vector.y2/2.0;
artifact=GetImageArtifact(image,"gradient:center");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->center.x,
&gradient->center.y);
artifact=GetImageArtifact(image,"gradient:angle");
if ((type == LinearGradient) && (artifact != (const char *) NULL))
{
double
sine,
cosine,
distance;
/*
Reference https://drafts.csswg.org/css-images-3/#linear-gradients.
*/
sine=sin((double) DegreesToRadians(gradient->angle-90.0));
cosine=cos((double) DegreesToRadians(gradient->angle-90.0));
distance=fabs((double) image->columns*cosine)+
fabs((double) image->rows*sine);
gradient->gradient_vector.x1=0.5*(image->columns-distance*cosine);
gradient->gradient_vector.y1=0.5*(image->rows-distance*sine);
gradient->gradient_vector.x2=0.5*(image->columns+distance*cosine);
gradient->gradient_vector.y2=0.5*(image->rows+distance*sine);
}
gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0;
gradient->radii.y=gradient->radii.x;
artifact=GetImageArtifact(image,"gradient:extent");
if (artifact != (const char *) NULL)
{
if (LocaleCompare(artifact,"Circle") == 0)
{
gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Diagonal") == 0)
{
gradient->radii.x=(double) (sqrt(image->columns*image->columns+
image->rows*image->rows))/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Ellipse") == 0)
{
gradient->radii.x=(double) image->columns/2.0;
gradient->radii.y=(double) image->rows/2.0;
}
if (LocaleCompare(artifact,"Maximum") == 0)
{
gradient->radii.x=(double) MagickMax(image->columns,image->rows)/2.0;
gradient->radii.y=gradient->radii.x;
}
if (LocaleCompare(artifact,"Minimum") == 0)
{
gradient->radii.x=(double) (MagickMin(image->columns,image->rows))/
2.0;
gradient->radii.y=gradient->radii.x;
}
}
artifact=GetImageArtifact(image,"gradient:radii");
if (artifact != (const char *) NULL)
(void) sscanf(artifact,"%lf%*[ ,]%lf",&gradient->radii.x,
&gradient->radii.y);
gradient->radius=MagickMax(gradient->radii.x,gradient->radii.y);
gradient->spread=method;
/*
Define the gradient to fill between the stops.
*/
gradient->number_stops=number_stops;
gradient->stops=(StopInfo *) AcquireQuantumMemory(gradient->number_stops,
sizeof(*gradient->stops));
if (gradient->stops == (StopInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
(void) CopyMagickMemory(gradient->stops,stops,(size_t) number_stops*
sizeof(*stops));
/*
Draw a gradient on the image.
*/
status=DrawGradientImage(image,draw_info,exception);
draw_info=DestroyDrawInfo(draw_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O i l P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OilPaintImage() applies a special effect filter that simulates an oil
% painting. Each pixel is replaced by the most frequent color occurring
% in a circular region defined by radius.
%
% The format of the OilPaintImage method is:
%
% Image *OilPaintImage(const Image *image,const double radius,
% const double sigma,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o radius: the radius of the circular neighborhood.
%
% o sigma: the standard deviation of the Gaussian, in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t **DestroyHistogramThreadSet(size_t **histogram)
{
register ssize_t
i;
assert(histogram != (size_t **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (histogram[i] != (size_t *) NULL)
histogram[i]=(size_t *) RelinquishMagickMemory(histogram[i]);
histogram=(size_t **) RelinquishMagickMemory(histogram);
return(histogram);
}
static size_t **AcquireHistogramThreadSet(const size_t count)
{
register ssize_t
i;
size_t
**histogram,
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
histogram=(size_t **) AcquireQuantumMemory(number_threads,sizeof(*histogram));
if (histogram == (size_t **) NULL)
return((size_t **) NULL);
(void) ResetMagickMemory(histogram,0,number_threads*sizeof(*histogram));
for (i=0; i < (ssize_t) number_threads; i++)
{
histogram[i]=(size_t *) AcquireQuantumMemory(count,sizeof(**histogram));
if (histogram[i] == (size_t *) NULL)
return(DestroyHistogramThreadSet(histogram));
}
return(histogram);
}
MagickExport Image *OilPaintImage(const Image *image,const double radius,
const double sigma,ExceptionInfo *exception)
{
#define NumberPaintBins 256
#define OilPaintImageTag "OilPaint/Image"
CacheView
*image_view,
*paint_view;
Image
*linear_image,
*paint_image;
MagickBooleanType
status;
MagickOffsetType
progress;
size_t
**histograms,
width;
ssize_t
center,
y;
/*
Initialize painted image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
width=GetOptimalKernelWidth2D(radius,sigma);
linear_image=CloneImage(image,0,0,MagickTrue,exception);
paint_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception);
if ((linear_image == (Image *) NULL) || (paint_image == (Image *) NULL))
{
if (linear_image != (Image *) NULL)
linear_image=DestroyImage(linear_image);
if (paint_image != (Image *) NULL)
linear_image=DestroyImage(paint_image);
return((Image *) NULL);
}
if (SetImageStorageClass(paint_image,DirectClass,exception) == MagickFalse)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
return((Image *) NULL);
}
histograms=AcquireHistogramThreadSet(NumberPaintBins);
if (histograms == (size_t **) NULL)
{
linear_image=DestroyImage(linear_image);
paint_image=DestroyImage(paint_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Oil paint image.
*/
status=MagickTrue;
progress=0;
center=(ssize_t) GetPixelChannels(linear_image)*(linear_image->columns+width)*
(width/2L)+GetPixelChannels(linear_image)*(width/2L);
image_view=AcquireVirtualCacheView(linear_image,exception);
paint_view=AcquireAuthenticCacheView(paint_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(linear_image,paint_image,linear_image->rows,1)
#endif
for (y=0; y < (ssize_t) linear_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register size_t
*histogram;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(width/2L),linear_image->columns+width,width,exception);
q=QueueCacheViewAuthenticPixels(paint_view,0,y,paint_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
histogram=histograms[GetOpenMPThreadId()];
for (x=0; x < (ssize_t) linear_image->columns; x++)
{
register ssize_t
i,
u;
size_t
count;
ssize_t
j,
k,
n,
v;
/*
Assign most frequent color.
*/
k=0;
j=0;
count=0;
(void) ResetMagickMemory(histogram,0,NumberPaintBins* sizeof(*histogram));
for (v=0; v < (ssize_t) width; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
n=(ssize_t) ScaleQuantumToChar(ClampToQuantum(GetPixelIntensity(
linear_image,p+GetPixelChannels(linear_image)*(u+k))));
histogram[n]++;
if (histogram[n] > count)
{
j=k+u;
count=histogram[n];
}
}
k+=(ssize_t) (linear_image->columns+width);
}
for (i=0; i < (ssize_t) GetPixelChannels(linear_image); i++)
{
PixelChannel channel=GetPixelChannelChannel(linear_image,i);
PixelTrait traits=GetPixelChannelTraits(linear_image,channel);
PixelTrait paint_traits=GetPixelChannelTraits(paint_image,channel);
if ((traits == UndefinedPixelTrait) ||
(paint_traits == UndefinedPixelTrait))
continue;
if (((paint_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(linear_image,p) == 0))
{
SetPixelChannel(paint_image,channel,p[center+i],q);
continue;
}
SetPixelChannel(paint_image,channel,p[j*GetPixelChannels(linear_image)+
i],q);
}
p+=GetPixelChannels(linear_image);
q+=GetPixelChannels(paint_image);
}
if (SyncCacheViewAuthenticPixels(paint_view,exception) == MagickFalse)
status=MagickFalse;
if (linear_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OilPaintImage)
#endif
proceed=SetImageProgress(linear_image,OilPaintImageTag,progress++,
linear_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
paint_view=DestroyCacheView(paint_view);
image_view=DestroyCacheView(image_view);
histograms=DestroyHistogramThreadSet(histograms);
linear_image=DestroyImage(linear_image);
if (status == MagickFalse)
paint_image=DestroyImage(paint_image);
return(paint_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O p a q u e P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpaquePaintImage() changes any pixel that matches color with the color
% defined by fill argument.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the OpaquePaintImage method is:
%
% MagickBooleanType OpaquePaintImage(Image *image,const PixelInfo *target,
% const PixelInfo *fill,const MagickBooleanType invert,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the RGB value of the target color.
%
% o fill: the replacement color.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OpaquePaintImage(Image *image,
const PixelInfo *target,const PixelInfo *fill,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define OpaquePaintImageTag "Opaque/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
conform_fill,
conform_target,
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
assert(fill != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
ConformPixelInfo(image,fill,&conform_fill,exception);
ConformPixelInfo(image,target,&conform_target,exception);
/*
Make image color opaque.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,&conform_target) != invert)
{
if (image->channel_map[RedPixelChannel].traits == UpdatePixelTrait)
SetPixelRed(image,conform_fill.red,q);
if (image->channel_map[GreenPixelChannel].traits == UpdatePixelTrait)
SetPixelGreen(image,conform_fill.green,q);
if (image->channel_map[BluePixelChannel].traits == UpdatePixelTrait)
SetPixelBlue(image,conform_fill.blue,q);
if (image->channel_map[BlackPixelChannel].traits == UpdatePixelTrait)
SetPixelBlack(image,conform_fill.black,q);
if (image->channel_map[AlphaPixelChannel].traits == UpdatePixelTrait)
SetPixelAlpha(image,conform_fill.alpha,q);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_OpaquePaintImage)
#endif
proceed=SetImageProgress(image,OpaquePaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImage() changes the opacity value associated with any pixel
% that matches color to the value defined by opacity.
%
% By default color must match a particular pixel color exactly. However, in
% many cases two colors may differ by a small amount. Fuzz defines how much
% tolerance is acceptable to consider two colors as the same. For example,
% set fuzz to 10 and the color red at intensities of 100 and 102 respectively
% are now interpreted as the same color.
%
% The format of the TransparentPaintImage method is:
%
% MagickBooleanType TransparentPaintImage(Image *image,
% const PixelInfo *target,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o target: the target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImage(Image *image,
const PixelInfo *target,const Quantum opacity,const MagickBooleanType invert,
ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(target != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
GetPixelInfo(image,&zero);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
PixelInfo
pixel;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
if (IsFuzzyEquivalencePixelInfo(&pixel,target) != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImage)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p a r e n t P a i n t I m a g e C h r o m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransparentPaintImageChroma() changes the opacity value associated with any
% pixel that matches color to the value defined by opacity.
%
% As there is one fuzz value for the all the channels, TransparentPaintImage()
% is not suitable for the operations like chroma, where the tolerance for
% similarity of two color component (RGB) can be different. Thus we define
% this method to take two target pixels (one low and one high) and all the
% pixels of an image which are lying between these two pixels are made
% transparent.
%
% The format of the TransparentPaintImageChroma method is:
%
% MagickBooleanType TransparentPaintImageChroma(Image *image,
% const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
% const MagickBooleanType invert,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low: the low target color.
%
% o high: the high target color.
%
% o opacity: the replacement opacity value.
%
% o invert: paint any pixel that does not match the target color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType TransparentPaintImageChroma(Image *image,
const PixelInfo *low,const PixelInfo *high,const Quantum opacity,
const MagickBooleanType invert,ExceptionInfo *exception)
{
#define TransparentPaintImageTag "Transparent/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(high != (PixelInfo *) NULL);
assert(low != (PixelInfo *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception);
/*
Make image color transparent.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
match;
PixelInfo
pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
GetPixelInfo(image,&pixel);
for (x=0; x < (ssize_t) image->columns; x++)
{
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
GetPixelInfoPixel(image,q,&pixel);
match=((pixel.red >= low->red) && (pixel.red <= high->red) &&
(pixel.green >= low->green) && (pixel.green <= high->green) &&
(pixel.blue >= low->blue) && (pixel.blue <= high->blue)) ? MagickTrue :
MagickFalse;
if (match != invert)
SetPixelAlpha(image,opacity,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransparentPaintImageChroma)
#endif
proceed=SetImageProgress(image,TransparentPaintImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
mean.c | /******************************************************************
* Melissa *
*-----------------------------------------------------------------*
* COPYRIGHT (C) 2017 by INRIA and EDF. ALL RIGHTS RESERVED. *
* *
* This source is covered by the BSD 3-Clause License. *
* Refer to the LICENCE file for further information. *
* *
*-----------------------------------------------------------------*
* Original Contributors: *
* Theophile Terraz, *
* Bruno Raffin, *
* Alejandro Ribes, *
* Bertrand Iooss, *
******************************************************************/
/**
*
* @file mean.c
* @brief Mean related functions.
* @author Terraz Théophile
* @date 2016-15-02
*
**/
#if BUILD_WITH_MPI == 0
#undef BUILD_WITH_MPI
#endif // BUILD_WITH_MPI
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#ifdef BUILD_WITH_OPENMP
#include <omp.h>
#endif // BUILD_WITH_OPENMP
#include "mean.h"
#include "melissa_utils.h"
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function initializes a mean structure.
*
*******************************************************************************
*
* @param[in,out] *mean
* the mean structure to initialize
*
* @param[in] vect_size
* size of the mean vector
*
*******************************************************************************/
void init_mean (mean_t *mean,
const int vect_size)
{
mean->mean = melissa_calloc (vect_size, sizeof(double));
mean->increment = 0;
}
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function updates the incremental mean.
*
*******************************************************************************
*
* @param[in,out] *mean
* input: previously computed iterative mean,
* output: updated mean
*
* @param[in] in_vect[]
* input vector of double values
*
* @param[in] vect_size
* size of the input vectors
*
*******************************************************************************/
void increment_mean (mean_t *mean,
double in_vect[],
const int vect_size)
{
int i;
double temp;
mean->increment += 1;
#pragma omp parallel for schedule(static) private(temp)
for (i=0; i<vect_size; i++)
{
temp = mean->mean[i];
// mean = temp + in_vect/increment
mean->mean[i] = temp + (in_vect[i] - temp)/mean->increment;
}
}
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function agregates two partial means.
*
*******************************************************************************
*
* @param[in] *mean1
* first input vector of partial means
*
* @param[in] *mean2
* second input vector of partial means
*
* @param[out] *updated_mean
* the updated mean
*
* @param[in] vect_size
* size of the input and output vectors
*
*******************************************************************************/
void update_mean (mean_t *mean1,
mean_t *mean2,
mean_t *updated_mean,
const int vect_size)
{
int i;
double delta;
updated_mean->increment = mean2->increment + mean1->increment;
#pragma omp parallel for schedule(static) private(delta)
for (i=0; i<vect_size; i++)
{
delta = (mean2->mean[i] - mean1->mean[i]);
updated_mean->mean[i] = mean1->mean[i] + mean2->increment * delta / updated_mean->increment;
}
}
#ifdef BUILD_WITH_MPI
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function agregates the partial means from all process on precess 0.
*
*******************************************************************************
*
* @param[in,out] *mean[]
* input: partial mean,
* output: global mean on process 0
*
* @param[in] vect_size
* size of the input vector
*
* @param[in] rank
* process rank in "comm"
*
* @param[in] comm_size
* nomber of process in "comm"
*
* @param[in] comm
* MPI communicator
*
*******************************************************************************/
void update_global_mean (mean_t *mean,
const int vect_size,
const int rank,
const int comm_size,
MPI_Comm comm)
{
double *global_mean = NULL;
double *global_mean_ptr = NULL;
double *mean_ptr = NULL;
double delta;
int temp_inc;
int i, j;
MPI_Status status;
if (rank == 0)
{
global_mean = melissa_malloc (vect_size * sizeof(double));
memcpy (global_mean, mean->mean, vect_size * sizeof(double));
for (i=1; i<comm_size; i++)
{
MPI_Recv (&temp_inc, 1, MPI_INT, i, i, comm, &status);
MPI_Recv (mean->mean, vect_size, MPI_DOUBLE, i, comm_size+i, comm, &status);
mean_ptr = mean->mean;
global_mean_ptr = global_mean;
for (j=0; j<vect_size; j++, mean_ptr++, global_mean_ptr++)
{
delta = (*global_mean_ptr - *mean_ptr);
*global_mean_ptr = *mean_ptr + mean->increment * delta / (mean->increment + temp_inc);
}
mean->increment += temp_inc;
}
memcpy (mean->mean, global_mean, vect_size * sizeof(double));
melissa_free (global_mean);
}
else // rank == 0
{
MPI_Send (&(mean->increment), 1, MPI_INT, 0, rank, comm);
MPI_Send (mean->mean, vect_size, MPI_DOUBLE, 0, comm_size+rank, comm);
}
}
#endif // BUILD_WITH_MPI
/**
*******************************************************************************
*
* @ingroup save_stats
*
* This function writes an array of mean structures on disc
*
*******************************************************************************
*
* @param[in] *means
* mean structures to save, size nb_time_steps
*
* @param[in] vect_size
* size of double vectors
*
* @param[in] nb_time_steps
* number of time_steps of the study
*
* @param[in] f
* file descriptor
*
*******************************************************************************/
void save_mean(mean_t *means,
int vect_size,
int nb_time_steps,
FILE* f)
{
int i;
for (i=0; i<nb_time_steps; i++)
{
fwrite(means[i].mean, sizeof(double), vect_size, f);
fwrite(&means[i].increment, sizeof(int), 1, f);
}
}
/**
*******************************************************************************
*
* @ingroup save_stats
*
* This function reads an array of mean structures on disc
*
*******************************************************************************
*
* @param[in] *means
* mean structures to read, size nb_time_steps
*
* @param[in] vect_size
* size of double vectors
*
* @param[in] nb_time_steps
* number of time_steps of the study
*
* @param[in] f
* file descriptor
*
*******************************************************************************/
void read_mean(mean_t *means,
int vect_size,
int nb_time_steps,
FILE* f)
{
int i;
for (i=0; i<nb_time_steps; i++)
{
fread(means[i].mean, sizeof(double), vect_size, f);
fread(&means[i].increment, sizeof(int), 1, f);
}
}
/**
*******************************************************************************
*
* @ingroup stats_base
*
* This function frees a mean structure.
*
*******************************************************************************
*
* @param[in] *mean
* the mean structure to free
*
*******************************************************************************/
void free_mean (mean_t *mean)
{
melissa_free (mean->mean);
}
|
mergeOpenMP.c | #include <assert.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <omp.h>
#include "timer.h"
#define TASK_SIZE 50
void merge(int *X, int n, int *tmp) {
int i = 0;
int j = n/2;
int ti = 0;
while (i<n/2 && j<n) {
if (X[i] < X[j]) {
tmp[ti] = X[i];
ti++; i++;
} else {
tmp[ti] = X[j];
ti++; j++;
}
}
while (i<n/2) { /* finish up lower half */
tmp[ti] = X[i];
ti++; i++;
}
while (j<n) { /* finish up upper half */
tmp[ti] = X[j];
ti++; j++;
}
memcpy(X, tmp, n*sizeof(int));
}
void mergeSort(int *X, int n, int *tmp)
{
if (n < 2) return;
#pragma omp task shared(X) if (n > TASK_SIZE)
mergeSort(X, n/2, tmp);
#pragma omp task shared(X) if (n > TASK_SIZE)
mergeSort(X+(n/2), n-(n/2), tmp + n/2);
#pragma omp taskwait
merge(X, n, tmp);
}
//Auxilary Function used to help print an array for debugging
void printArray(int *a, int size){
for(int i = 0; i < size; i++)
printf("%d ", a[i]);
printf("\n");
}
//Auxilary function will print if the list is not properly sorted
void isSorted(int *a, int size){
for(int i = 0; i < size - 1; i++)
if(a[i] > a[i + 1]){
printf("List not sorted");
}
}
//Auxilary function used to generate the array to sort
int * generateArray( int *arr, int size){
int *unsorted = calloc(size, sizeof(int));
for (int i = 0; i < size; i++){
unsorted[i] = rand() % 101;
}
return unsorted;
}
int main(int argc, char *argv[]) {
srand(123456);
int size;
int numThreads;
if (argc <= 1){
size = 10;
numThreads = 2;
printf("Default 10\n");
}
else{
size = atoi(argv[1]);
numThreads = atoi(argv[2]);
printf("Custom Size\n");
}
int *unsorted = calloc(size, sizeof(int));
int *tmp = calloc(size, sizeof(int));
omp_set_dynamic(0); /** Explicitly disable dynamic teams **/
omp_set_num_threads(numThreads); /** Use N threads for all parallel regions **/
unsorted = generateArray(unsorted, size);
StartTimer();
#pragma omp parallel
{
#pragma omp single
mergeSort(unsorted, size, tmp);
}
double runtime = GetTimer();
printf("Time: %fs\n",runtime / 1000);
isSorted(unsorted, size);
free(unsorted);
free(tmp);
return (0);
}
|
omp_report_mask.c | /* Routine reports OpenMP process affinity information.
Get thread number and cpus (cpu_ids)
Create static space (proc_mask) to hold all masks (done in a single region)
Determine the mask for each thread (insert it in proc_mask)
print mask header (one thread in single region)
print mask (one thread in single region)
free spaces
return
*/
#include <stdio.h>
#include <omp.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include "opts.h"
void print_mask(int hd_prnt, char* name, int multi_node, int rank, int thrd, int ncpus, int nranks, int nthrds, int *proc_mask, int tpc, char l);
int boundto(int* nelements_set, int* int_mask);
int get_threads_per_node();
void omp_report_mask(){
int nthrds, thrd; //Thread info
int ncpus, nel_set;
static int ** proc_mask;
int i,j, ierr;
char * dummy;
char l,p;
int tpc; // hwthreads/core
Maskopts opts;
// get print_speed fast or slow (f|c); listing cores or SMT (c|s)
p = opts.get_p();
l = opts.get_l();
tpc=get_threads_per_node();
thrd = omp_get_thread_num();
nthrds = omp_get_num_threads();
ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN);
if(omp_get_num_procs() != ncpus){
printf("ERROR: ncpus_by_omp=%d, ncpus_sched=%d\n",omp_get_num_procs(),ncpus);
exit(1);
}
#pragma omp single
{
proc_mask = (int **) malloc(sizeof(int*)*nthrds);
for(i=0;i<nthrds;i++) proc_mask[i] = (int * ) malloc(sizeof(int)*ncpus );
for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0;
}
ierr = boundto(&nel_set,proc_mask[thrd]);
#pragma omp barrier
#pragma omp single
{
print_mask(1, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,l); //print header
for(thrd=0;thrd<nthrds;thrd++){
print_mask(0, dummy, 0, 0,thrd, ncpus, 1,nthrds, proc_mask[thrd],tpc,l);
if(p == 's') ierr=usleep(300000);
}
if(nthrds>50)
print_mask(2, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd],tpc,l); //print header
for(i=0;i<nthrds;i++) free( proc_mask[i]);
free( proc_mask);
}
}
void omp_report_mask_(){ omp_report_mask(); }
|
omp-low.c | /* Lowering pass for OMP directives. Converts OMP directives into explicit
calls to the runtime library (libgomp), data marshalling to implement data
sharing and copying clauses, offloading to accelerators, and more.
Contributed by Diego Novillo <dnovillo@redhat.com>
Copyright (C) 2005-2020 Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "tree.h"
#include "gimple.h"
#include "tree-pass.h"
#include "ssa.h"
#include "cgraph.h"
#include "pretty-print.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "internal-fn.h"
#include "gimple-fold.h"
#include "gimplify.h"
#include "gimple-iterator.h"
#include "gimplify-me.h"
#include "gimple-walk.h"
#include "tree-iterator.h"
#include "tree-inline.h"
#include "langhooks.h"
#include "tree-dfa.h"
#include "tree-ssa.h"
#include "splay-tree.h"
#include "omp-general.h"
#include "omp-low.h"
#include "omp-grid.h"
#include "gimple-low.h"
#include "alloc-pool.h"
#include "symbol-summary.h"
#include "tree-nested.h"
#include "context.h"
#include "gomp-constants.h"
#include "gimple-pretty-print.h"
#include "hsa-common.h"
#include "stringpool.h"
#include "attribs.h"
/* Lowering of OMP parallel and workshare constructs proceeds in two
phases. The first phase scans the function looking for OMP statements
and then for variables that must be replaced to satisfy data sharing
clauses. The second phase expands code for the constructs, as well as
re-gimplifying things when variables have been replaced with complex
expressions.
Final code generation is done by pass_expand_omp. The flowgraph is
scanned for regions which are then moved to a new
function, to be invoked by the thread library, or offloaded. */
/* Context structure. Used to store information about each parallel
directive in the code. */
struct omp_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
/* The tree of contexts corresponding to the encountered constructs. */
struct omp_context *outer;
gimple *stmt;
/* Map variables to fields in a structure that allows communication
between sending and receiving threads. */
splay_tree field_map;
tree record_type;
tree sender_decl;
tree receiver_decl;
/* These are used just by task contexts, if task firstprivate fn is
needed. srecord_type is used to communicate from the thread
that encountered the task construct to task firstprivate fn,
record_type is allocated by GOMP_task, initialized by task firstprivate
fn and passed to the task body fn. */
splay_tree sfield_map;
tree srecord_type;
/* A chain of variables to add to the top-level block surrounding the
construct. In the case of a parallel, this is in the child function. */
tree block_vars;
/* Label to which GOMP_cancel{,llation_point} and explicit and implicit
barriers should jump to during omplower pass. */
tree cancel_label;
/* The sibling GIMPLE_OMP_FOR simd with _simt_ clause or NULL
otherwise. */
gimple *simt_stmt;
/* For task reductions registered in this context, a vector containing
the length of the private copies block (if constant, otherwise NULL)
and then offsets (if constant, otherwise NULL) for each entry. */
vec<tree> task_reductions;
/* A hash map from the reduction clauses to the registered array
elts. */
hash_map<tree, unsigned> *task_reduction_map;
/* And a hash map from the lastprivate(conditional:) variables to their
corresponding tracking loop iteration variables. */
hash_map<tree, tree> *lastprivate_conditional_map;
/* A tree_list of the reduction clauses in this context. This is
only used for checking the consistency of OpenACC reduction
clauses in scan_omp_for and is not guaranteed to contain a valid
value outside of this function. */
tree local_reduction_clauses;
/* A tree_list of the reduction clauses in outer contexts. This is
only used for checking the consistency of OpenACC reduction
clauses in scan_omp_for and is not guaranteed to contain a valid
value outside of this function. */
tree outer_reduction_clauses;
/* Nesting depth of this context. Used to beautify error messages re
invalid gotos. The outermost ctx is depth 1, with depth 0 being
reserved for the main body of the function. */
int depth;
/* True if this parallel directive is nested within another. */
bool is_nested;
/* True if this construct can be cancelled. */
bool cancellable;
/* True if lower_omp_1 should look up lastprivate conditional in parent
context. */
bool combined_into_simd_safelen1;
/* True if there is nested scan context with inclusive clause. */
bool scan_inclusive;
/* True if there is nested scan context with exclusive clause. */
bool scan_exclusive;
/* True in the second simd loop of for simd with inscan reductions. */
bool for_simd_scan_phase;
/* True if there is order(concurrent) clause on the construct. */
bool order_concurrent;
/* True if there is bind clause on the construct (i.e. a loop construct). */
bool loop_p;
};
static splay_tree all_contexts;
static int taskreg_nesting_level;
static int target_nesting_level;
static bitmap task_shared_vars;
static bitmap global_nonaddressable_vars;
static vec<omp_context *> taskreg_contexts;
static void scan_omp (gimple_seq *, omp_context *);
static tree scan_omp_1_op (tree *, int *, void *);
#define WALK_SUBSTMTS \
case GIMPLE_BIND: \
case GIMPLE_TRY: \
case GIMPLE_CATCH: \
case GIMPLE_EH_FILTER: \
case GIMPLE_TRANSACTION: \
/* The sub-statements for these should be walked. */ \
*handled_ops_p = false; \
break;
/* Return true if CTX corresponds to an OpenACC 'parallel' or 'serial'
region. */
static bool
is_oacc_parallel_or_serial (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& ((gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_PARALLEL)
|| (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_SERIAL)));
}
/* Return true if CTX corresponds to an oacc kernels region. */
static bool
is_oacc_kernels (omp_context *ctx)
{
enum gimple_code outer_type = gimple_code (ctx->stmt);
return ((outer_type == GIMPLE_OMP_TARGET)
&& (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_OACC_KERNELS));
}
/* If DECL is the artificial dummy VAR_DECL created for non-static
data member privatization, return the underlying "this" parameter,
otherwise return NULL. */
tree
omp_member_access_dummy_var (tree decl)
{
if (!VAR_P (decl)
|| !DECL_ARTIFICIAL (decl)
|| !DECL_IGNORED_P (decl)
|| !DECL_HAS_VALUE_EXPR_P (decl)
|| !lang_hooks.decls.omp_disregard_value_expr (decl, false))
return NULL_TREE;
tree v = DECL_VALUE_EXPR (decl);
if (TREE_CODE (v) != COMPONENT_REF)
return NULL_TREE;
while (1)
switch (TREE_CODE (v))
{
case COMPONENT_REF:
case MEM_REF:
case INDIRECT_REF:
CASE_CONVERT:
case POINTER_PLUS_EXPR:
v = TREE_OPERAND (v, 0);
continue;
case PARM_DECL:
if (DECL_CONTEXT (v) == current_function_decl
&& DECL_ARTIFICIAL (v)
&& TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE)
return v;
return NULL_TREE;
default:
return NULL_TREE;
}
}
/* Helper for unshare_and_remap, called through walk_tree. */
static tree
unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data)
{
tree *pair = (tree *) data;
if (*tp == pair[0])
{
*tp = unshare_expr (pair[1]);
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* Return unshare_expr (X) with all occurrences of FROM
replaced with TO. */
static tree
unshare_and_remap (tree x, tree from, tree to)
{
tree pair[2] = { from, to };
x = unshare_expr (x);
walk_tree (&x, unshare_and_remap_1, pair, NULL);
return x;
}
/* Convenience function for calling scan_omp_1_op on tree operands. */
static inline tree
scan_omp_op (tree *tp, omp_context *ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
return walk_tree (tp, scan_omp_1_op, &wi, NULL);
}
static void lower_omp (gimple_seq *, omp_context *);
static tree lookup_decl_in_outer_ctx (tree, omp_context *);
static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
/* Return true if CTX is for an omp parallel. */
static inline bool
is_parallel_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
}
/* Return true if CTX is for an omp task. */
static inline bool
is_task_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
}
/* Return true if CTX is for an omp taskloop. */
static inline bool
is_taskloop_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP;
}
/* Return true if CTX is for a host omp teams. */
static inline bool
is_host_teams_ctx (omp_context *ctx)
{
return gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& gimple_omp_teams_host (as_a <gomp_teams *> (ctx->stmt));
}
/* Return true if CTX is for an omp parallel or omp task or host omp teams
(the last one is strictly not a task region in OpenMP speak, but we
need to treat it similarly). */
static inline bool
is_taskreg_ctx (omp_context *ctx)
{
return is_parallel_ctx (ctx) || is_task_ctx (ctx) || is_host_teams_ctx (ctx);
}
/* Return true if EXPR is variable sized. */
static inline bool
is_variable_sized (const_tree expr)
{
return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
}
/* Lookup variables. The "maybe" form
allows for the variable form to not have been entered, otherwise we
assert that the variable must have been entered. */
static inline tree
lookup_decl (tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (var);
return *n;
}
static inline tree
maybe_lookup_decl (const_tree var, omp_context *ctx)
{
tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
return n ? *n : NULL_TREE;
}
static inline tree
lookup_field (tree var, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
return (tree) n->value;
}
static inline tree
lookup_sfield (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->sfield_map
? ctx->sfield_map : ctx->field_map, key);
return (tree) n->value;
}
static inline tree
lookup_sfield (tree var, omp_context *ctx)
{
return lookup_sfield ((splay_tree_key) var, ctx);
}
static inline tree
maybe_lookup_field (splay_tree_key key, omp_context *ctx)
{
splay_tree_node n;
n = splay_tree_lookup (ctx->field_map, key);
return n ? (tree) n->value : NULL_TREE;
}
static inline tree
maybe_lookup_field (tree var, omp_context *ctx)
{
return maybe_lookup_field ((splay_tree_key) var, ctx);
}
/* Return true if DECL should be copied by pointer. SHARED_CTX is
the parallel context if DECL is to be shared. */
static bool
use_pointer_for_field (tree decl, omp_context *shared_ctx)
{
if (AGGREGATE_TYPE_P (TREE_TYPE (decl))
|| TYPE_ATOMIC (TREE_TYPE (decl)))
return true;
/* We can only use copy-in/copy-out semantics for shared variables
when we know the value is not accessible from an outer scope. */
if (shared_ctx)
{
gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
/* ??? Trivially accessible from anywhere. But why would we even
be passing an address in this case? Should we simply assert
this to be false, or should we have a cleanup pass that removes
these from the list of mappings? */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, shared_ctx)))
return true;
/* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
without analyzing the expression whether or not its location
is accessible to anyone else. In the case of nested parallel
regions it certainly may be. */
if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
return true;
/* Do not use copy-in/copy-out for variables that have their
address taken. */
if (is_global_var (decl))
{
/* For file scope vars, track whether we've seen them as
non-addressable initially and in that case, keep the same
answer for the duration of the pass, even when they are made
addressable later on e.g. through reduction expansion. Global
variables which weren't addressable before the pass will not
have their privatized copies address taken. See PR91216. */
if (!TREE_ADDRESSABLE (decl))
{
if (!global_nonaddressable_vars)
global_nonaddressable_vars = BITMAP_ALLOC (NULL);
bitmap_set_bit (global_nonaddressable_vars, DECL_UID (decl));
}
else if (!global_nonaddressable_vars
|| !bitmap_bit_p (global_nonaddressable_vars,
DECL_UID (decl)))
return true;
}
else if (TREE_ADDRESSABLE (decl))
return true;
/* lower_send_shared_vars only uses copy-in, but not copy-out
for these. */
if (TREE_READONLY (decl)
|| ((TREE_CODE (decl) == RESULT_DECL
|| TREE_CODE (decl) == PARM_DECL)
&& DECL_BY_REFERENCE (decl)))
return false;
/* Disallow copy-in/out in nested parallel if
decl is shared in outer parallel, otherwise
each thread could store the shared variable
in its own copy-in location, making the
variable no longer really shared. */
if (shared_ctx->is_nested)
{
omp_context *up;
for (up = shared_ctx->outer; up; up = up->outer)
if ((is_taskreg_ctx (up)
|| (gimple_code (up->stmt) == GIMPLE_OMP_TARGET
&& is_gimple_omp_offloaded (up->stmt)))
&& maybe_lookup_decl (decl, up))
break;
if (up)
{
tree c;
if (gimple_code (up->stmt) == GIMPLE_OMP_TARGET)
{
for (c = gimple_omp_target_clauses (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_DECL (c) == decl)
break;
}
else
for (c = gimple_omp_taskreg_clauses (up->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_DECL (c) == decl)
break;
if (c)
goto maybe_mark_addressable_and_ret;
}
}
/* For tasks avoid using copy-in/out. As tasks can be
deferred or executed in different thread, when GOMP_task
returns, the task hasn't necessarily terminated. */
if (is_task_ctx (shared_ctx))
{
tree outer;
maybe_mark_addressable_and_ret:
outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer))
{
/* Taking address of OUTER in lower_send_shared_vars
might need regimplification of everything that uses the
variable. */
if (!task_shared_vars)
task_shared_vars = BITMAP_ALLOC (NULL);
bitmap_set_bit (task_shared_vars, DECL_UID (outer));
TREE_ADDRESSABLE (outer) = 1;
}
return true;
}
}
return false;
}
/* Construct a new automatic decl similar to VAR. */
static tree
omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
{
tree copy = copy_var_decl (var, name, type);
DECL_CONTEXT (copy) = current_function_decl;
DECL_CHAIN (copy) = ctx->block_vars;
/* If VAR is listed in task_shared_vars, it means it wasn't
originally addressable and is just because task needs to take
it's address. But we don't need to take address of privatizations
from that var. */
if (TREE_ADDRESSABLE (var)
&& ((task_shared_vars
&& bitmap_bit_p (task_shared_vars, DECL_UID (var)))
|| (global_nonaddressable_vars
&& bitmap_bit_p (global_nonaddressable_vars, DECL_UID (var)))))
TREE_ADDRESSABLE (copy) = 0;
ctx->block_vars = copy;
return copy;
}
static tree
omp_copy_decl_1 (tree var, omp_context *ctx)
{
return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
}
/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
as appropriate. */
static tree
omp_build_component_ref (tree obj, tree field)
{
tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
if (TREE_THIS_VOLATILE (field))
TREE_THIS_VOLATILE (ret) |= 1;
if (TREE_READONLY (field))
TREE_READONLY (ret) |= 1;
return ret;
}
/* Build tree nodes to access the field for VAR on the receiver side. */
static tree
build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
{
tree x, field = lookup_field (var, ctx);
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, ctx);
if (x != NULL)
field = x;
x = build_simple_mem_ref (ctx->receiver_decl);
TREE_THIS_NOTRAP (x) = 1;
x = omp_build_component_ref (x, field);
if (by_ref)
{
x = build_simple_mem_ref (x);
TREE_THIS_NOTRAP (x) = 1;
}
return x;
}
/* Build tree nodes to access VAR in the scope outer to CTX. In the case
of a parallel, this is a component reference; for workshare constructs
this is some variable. */
static tree
build_outer_var_ref (tree var, omp_context *ctx,
enum omp_clause_code code = OMP_CLAUSE_ERROR)
{
tree x;
omp_context *outer = ctx->outer;
while (outer && gimple_code (outer->stmt) == GIMPLE_OMP_TASKGROUP)
outer = outer->outer;
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else if (is_variable_sized (var))
{
x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
x = build_outer_var_ref (x, ctx, code);
x = build_simple_mem_ref (x);
}
else if (is_taskreg_ctx (ctx))
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
}
else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
|| ctx->loop_p
|| (code == OMP_CLAUSE_PRIVATE
&& (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS
|| gimple_code (ctx->stmt) == GIMPLE_OMP_SINGLE)))
{
/* #pragma omp simd isn't a worksharing construct, and can reference
even private vars in its linear etc. clauses.
Similarly for OMP_CLAUSE_PRIVATE with outer ref, that can refer
to private vars in all worksharing constructs. */
x = NULL_TREE;
if (outer && is_taskreg_ctx (outer))
x = lookup_decl (var, outer);
else if (outer)
x = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (x == NULL_TREE)
x = var;
}
else if (code == OMP_CLAUSE_LASTPRIVATE && is_taskloop_ctx (ctx))
{
gcc_assert (outer);
splay_tree_node n
= splay_tree_lookup (outer->field_map,
(splay_tree_key) &DECL_UID (var));
if (n == NULL)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, outer)))
x = var;
else
x = lookup_decl (var, outer);
}
else
{
tree field = (tree) n->value;
/* If the receiver record type was remapped in the child function,
remap the field into the new record type. */
x = maybe_lookup_field (field, outer);
if (x != NULL)
field = x;
x = build_simple_mem_ref (outer->receiver_decl);
x = omp_build_component_ref (x, field);
if (use_pointer_for_field (var, outer))
x = build_simple_mem_ref (x);
}
}
else if (outer)
{
if (gimple_code (outer->stmt) == GIMPLE_OMP_GRID_BODY)
{
outer = outer->outer;
gcc_assert (outer
&& gimple_code (outer->stmt) != GIMPLE_OMP_GRID_BODY);
}
x = lookup_decl (var, outer);
}
else if (omp_is_reference (var))
/* This can happen with orphaned constructs. If var is reference, it is
possible it is shared and as such valid. */
x = var;
else if (omp_member_access_dummy_var (var))
x = var;
else
gcc_unreachable ();
if (x == var)
{
tree t = omp_member_access_dummy_var (var);
if (t)
{
x = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
x = unshare_and_remap (x, t, o);
else
x = unshare_expr (x);
}
}
if (omp_is_reference (var))
x = build_simple_mem_ref (x);
return x;
}
/* Build tree nodes to access the field for VAR on the sender side. */
static tree
build_sender_ref (splay_tree_key key, omp_context *ctx)
{
tree field = lookup_sfield (key, ctx);
return omp_build_component_ref (ctx->sender_decl, field);
}
static tree
build_sender_ref (tree var, omp_context *ctx)
{
return build_sender_ref ((splay_tree_key) var, ctx);
}
/* Add a new field for VAR inside the structure CTX->SENDER_DECL. If
BASE_POINTERS_RESTRICT, declare the field with restrict. */
static void
install_var_field (tree var, bool by_ref, int mask, omp_context *ctx)
{
tree field, type, sfield = NULL_TREE;
splay_tree_key key = (splay_tree_key) var;
if ((mask & 16) != 0)
{
key = (splay_tree_key) &DECL_NAME (var);
gcc_checking_assert (key != (splay_tree_key) var);
}
if ((mask & 8) != 0)
{
key = (splay_tree_key) &DECL_UID (var);
gcc_checking_assert (key != (splay_tree_key) var);
}
gcc_assert ((mask & 1) == 0
|| !splay_tree_lookup (ctx->field_map, key));
gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
|| !splay_tree_lookup (ctx->sfield_map, key));
gcc_assert ((mask & 3) == 3
|| !is_gimple_omp_oacc (ctx->stmt));
type = TREE_TYPE (var);
if ((mask & 16) != 0)
type = lang_hooks.decls.omp_array_data (var, true);
/* Prevent redeclaring the var in the split-off function with a restrict
pointer type. Note that we only clear type itself, restrict qualifiers in
the pointed-to type will be ignored by points-to analysis. */
if (POINTER_TYPE_P (type)
&& TYPE_RESTRICT (type))
type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT);
if (mask & 4)
{
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
type = build_pointer_type (build_pointer_type (type));
}
else if (by_ref)
type = build_pointer_type (type);
else if ((mask & 3) == 1 && omp_is_reference (var))
type = TREE_TYPE (type);
field = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
/* Remember what variable this field was created for. This does have a
side effect of making dwarf2out ignore this member, so for helpful
debugging we clear it later in delete_omp_context. */
DECL_ABSTRACT_ORIGIN (field) = var;
if ((mask & 16) == 0 && type == TREE_TYPE (var))
{
SET_DECL_ALIGN (field, DECL_ALIGN (var));
DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
}
else
SET_DECL_ALIGN (field, TYPE_ALIGN (type));
if ((mask & 3) == 3)
{
insert_field_into_struct (ctx->record_type, field);
if (ctx->srecord_type)
{
sfield = build_decl (DECL_SOURCE_LOCATION (var),
FIELD_DECL, DECL_NAME (var), type);
DECL_ABSTRACT_ORIGIN (sfield) = var;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
insert_field_into_struct (ctx->srecord_type, sfield);
}
}
else
{
if (ctx->srecord_type == NULL_TREE)
{
tree t;
ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
{
sfield = build_decl (DECL_SOURCE_LOCATION (t),
FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
insert_field_into_struct (ctx->srecord_type, sfield);
splay_tree_insert (ctx->sfield_map,
(splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
(splay_tree_value) sfield);
}
}
sfield = field;
insert_field_into_struct ((mask & 1) ? ctx->record_type
: ctx->srecord_type, field);
}
if (mask & 1)
splay_tree_insert (ctx->field_map, key, (splay_tree_value) field);
if ((mask & 2) && ctx->sfield_map)
splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield);
}
static tree
install_var_local (tree var, omp_context *ctx)
{
tree new_var = omp_copy_decl_1 (var, ctx);
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
/* Adjust the replacement for DECL in CTX for the new context. This means
copying the DECL_VALUE_EXPR, and fixing up the type. */
static void
fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
{
tree new_decl, size;
new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
&& DECL_HAS_VALUE_EXPR_P (decl))
{
tree ve = DECL_VALUE_EXPR (decl);
walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
SET_DECL_VALUE_EXPR (new_decl, ve);
DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
}
if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
{
size = remap_decl (DECL_SIZE (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE (TREE_TYPE (new_decl));
DECL_SIZE (new_decl) = size;
size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
if (size == error_mark_node)
size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
DECL_SIZE_UNIT (new_decl) = size;
}
}
/* The callback for remap_decl. Search all containing contexts for a
mapping of the variable; this avoids having to duplicate the splay
tree ahead of time. We know a mapping doesn't already exist in the
given context. Create new mappings to implement default semantics. */
static tree
omp_copy_decl (tree var, copy_body_data *cb)
{
omp_context *ctx = (omp_context *) cb;
tree new_var;
if (TREE_CODE (var) == LABEL_DECL)
{
if (FORCED_LABEL (var) || DECL_NONLOCAL (var))
return var;
new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
DECL_CONTEXT (new_var) = current_function_decl;
insert_decl_map (&ctx->cb, var, new_var);
return new_var;
}
while (!is_taskreg_ctx (ctx))
{
ctx = ctx->outer;
if (ctx == NULL)
return var;
new_var = maybe_lookup_decl (var, ctx);
if (new_var)
return new_var;
}
if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
return var;
return error_mark_node;
}
/* Create a new context, with OUTER_CTX being the surrounding context. */
static omp_context *
new_omp_context (gimple *stmt, omp_context *outer_ctx)
{
omp_context *ctx = XCNEW (omp_context);
splay_tree_insert (all_contexts, (splay_tree_key) stmt,
(splay_tree_value) ctx);
ctx->stmt = stmt;
if (outer_ctx)
{
ctx->outer = outer_ctx;
ctx->cb = outer_ctx->cb;
ctx->cb.block = NULL;
ctx->depth = outer_ctx->depth + 1;
}
else
{
ctx->cb.src_fn = current_function_decl;
ctx->cb.dst_fn = current_function_decl;
ctx->cb.src_node = cgraph_node::get (current_function_decl);
gcc_checking_assert (ctx->cb.src_node);
ctx->cb.dst_node = ctx->cb.src_node;
ctx->cb.src_cfun = cfun;
ctx->cb.copy_decl = omp_copy_decl;
ctx->cb.eh_lp_nr = 0;
ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
ctx->cb.adjust_array_error_bounds = true;
ctx->cb.dont_remap_vla_if_no_change = true;
ctx->depth = 1;
}
ctx->cb.decl_map = new hash_map<tree, tree>;
return ctx;
}
static gimple_seq maybe_catch_exception (gimple_seq);
/* Finalize task copyfn. */
static void
finalize_task_copyfn (gomp_task *task_stmt)
{
struct function *child_cfun;
tree child_fn;
gimple_seq seq = NULL, new_seq;
gbind *bind;
child_fn = gimple_omp_task_copy_fn (task_stmt);
if (child_fn == NULL_TREE)
return;
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
push_cfun (child_cfun);
bind = gimplify_body (child_fn, false);
gimple_seq_add_stmt (&seq, bind);
new_seq = maybe_catch_exception (seq);
if (new_seq != seq)
{
bind = gimple_build_bind (NULL, new_seq, NULL);
seq = NULL;
gimple_seq_add_stmt (&seq, bind);
}
gimple_set_body (child_fn, seq);
pop_cfun ();
/* Inform the callgraph about the new function. */
cgraph_node *node = cgraph_node::get_create (child_fn);
node->parallelized_function = 1;
cgraph_node::add_new_function (child_fn, false);
}
/* Destroy a omp_context data structures. Called through the splay tree
value delete callback. */
static void
delete_omp_context (splay_tree_value value)
{
omp_context *ctx = (omp_context *) value;
delete ctx->cb.decl_map;
if (ctx->field_map)
splay_tree_delete (ctx->field_map);
if (ctx->sfield_map)
splay_tree_delete (ctx->sfield_map);
/* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
it produces corrupt debug information. */
if (ctx->record_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (ctx->srecord_type)
{
tree t;
for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
DECL_ABSTRACT_ORIGIN (t) = NULL;
}
if (is_task_ctx (ctx))
finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
if (ctx->task_reduction_map)
{
ctx->task_reductions.release ();
delete ctx->task_reduction_map;
}
delete ctx->lastprivate_conditional_map;
XDELETE (ctx);
}
/* Fix up RECEIVER_DECL with a type that has been remapped to the child
context. */
static void
fixup_child_record_type (omp_context *ctx)
{
tree f, type = ctx->record_type;
if (!ctx->receiver_decl)
return;
/* ??? It isn't sufficient to just call remap_type here, because
variably_modified_type_p doesn't work the way we expect for
record types. Testing each field for whether it needs remapping
and creating a new record by hand works, however. */
for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
break;
if (f)
{
tree name, new_fields = NULL;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (ctx->record_type));
name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
DECL_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
&ctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&ctx->cb, NULL);
new_fields = new_f;
/* Arrange to be able to look up the receiver field
given the sender field. */
splay_tree_insert (ctx->field_map, (splay_tree_key) f,
(splay_tree_value) new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
}
/* In a target region we never modify any of the pointers in *.omp_data_i,
so attempt to help the optimizers. */
if (is_gimple_omp_offloaded (ctx->stmt))
type = build_qualified_type (type, TYPE_QUAL_CONST);
TREE_TYPE (ctx->receiver_decl)
= build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
}
/* Instantiate decls as necessary in CTX to satisfy the data sharing
specified by CLAUSES. */
static void
scan_sharing_clauses (tree clauses, omp_context *ctx)
{
tree c, decl;
bool scan_array_reductions = false;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
bool by_ref;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
goto do_private;
else if (!is_variable_sized (decl))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
/* Ignore shared directives in teams construct inside of
target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
if (is_global_var (odecl))
break;
insert_decl_map (&ctx->cb, decl, odecl);
break;
}
gcc_assert (is_taskreg_ctx (ctx));
gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
|| !is_variable_sized (decl));
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
use_pointer_for_field (decl, ctx);
break;
}
by_ref = use_pointer_for_field (decl, NULL);
if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
|| TREE_ADDRESSABLE (decl)
|| by_ref
|| omp_is_reference (decl))
{
by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 3, ctx);
install_var_local (decl, ctx);
break;
}
/* We don't need to copy const scalar vars back. */
OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
goto do_private;
case OMP_CLAUSE_REDUCTION:
/* Collect 'reduction' clauses on OpenACC compute construct. */
if (is_gimple_omp_oacc (ctx->stmt)
&& is_gimple_omp_offloaded (ctx->stmt))
{
/* No 'reduction' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
ctx->local_reduction_clauses
= tree_cons (NULL, c, ctx->local_reduction_clauses);
}
/* FALLTHRU */
case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) == MEM_REF)
{
tree t = TREE_OPERAND (decl, 0);
if (TREE_CODE (t) == POINTER_PLUS_EXPR)
t = TREE_OPERAND (t, 0);
if (TREE_CODE (t) == INDIRECT_REF
|| TREE_CODE (t) == ADDR_EXPR)
t = TREE_OPERAND (t, 0);
install_var_local (t, ctx);
if (is_taskreg_ctx (ctx)
&& (!is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
|| (is_task_ctx (ctx)
&& (TREE_CODE (TREE_TYPE (t)) == POINTER_TYPE
|| (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (t)))
== POINTER_TYPE)))))
&& !is_variable_sized (t)
&& (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| (!OMP_CLAUSE_REDUCTION_TASK (c)
&& !is_task_ctx (ctx))))
{
by_ref = use_pointer_for_field (t, NULL);
if (is_task_ctx (ctx)
&& TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (t))) == POINTER_TYPE)
{
install_var_field (t, false, 1, ctx);
install_var_field (t, by_ref, 2, ctx);
}
else
install_var_field (t, by_ref, 3, ctx);
}
break;
}
if (is_task_ctx (ctx)
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_TASK (c)
&& is_parallel_ctx (ctx)))
{
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (!is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
{
by_ref = use_pointer_for_field (decl, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION)
install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_TASK (c))
{
install_var_local (decl, ctx);
break;
}
goto do_private;
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LINEAR:
decl = OMP_CLAUSE_DECL (c);
do_private:
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
install_var_field (decl, !omp_is_reference (decl), 3, ctx);
else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 3, ctx);
else
install_var_field (decl, false, 3, ctx);
}
if (is_variable_sized (decl))
{
if (is_task_ctx (ctx))
install_var_field (decl, false, 1, ctx);
break;
}
else if (is_taskreg_ctx (ctx))
{
bool global
= is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
by_ref = use_pointer_for_field (decl, NULL);
if (is_task_ctx (ctx)
&& (global || by_ref || omp_is_reference (decl)))
{
install_var_field (decl, false, 1, ctx);
if (!global)
install_var_field (decl, by_ref, 2, ctx);
}
else if (!global)
install_var_field (decl, by_ref, 3, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
decl = OMP_CLAUSE_DECL (c);
/* Fortran array descriptors. */
if (lang_hooks.decls.omp_array_data (decl, true))
install_var_field (decl, false, 19, ctx);
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (decl)
&& !omp_is_allocatable_or_ptr (decl))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 11, ctx);
else
install_var_field (decl, false, 11, ctx);
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
goto do_private;
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
decl = OMP_CLAUSE_DECL (c);
install_var_field (decl, false, 3, ctx);
install_var_local (decl, ctx);
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
decl = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (decl, NULL);
install_var_field (decl, by_ref, 3, ctx);
break;
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
break;
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_MAP:
if (ctx->outer)
scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
/* Global variables with "omp declare target" attribute
don't need to be copied, the receiver side will use them
directly. However, global variables with "omp declare target link"
attribute need to be copied. Or when ALWAYS modifier is used. */
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TO
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_FROM
&& OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_TOFROM
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target link",
DECL_ATTRIBUTES (decl)))
break;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
{
/* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
not offloaded; there is nothing to map for those. */
if (!is_gimple_omp_offloaded (ctx->stmt)
&& !POINTER_TYPE_P (TREE_TYPE (decl))
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
break;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
{
if (TREE_CODE (decl) == COMPONENT_REF
|| (TREE_CODE (decl) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
== REFERENCE_TYPE)))
break;
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
}
install_var_local (decl, ctx);
break;
}
if (DECL_P (decl))
{
if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_field (decl2, true, 3, ctx);
install_var_local (decl2, ctx);
install_var_local (decl, ctx);
}
else
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_field (decl, true, 7, ctx);
else
install_var_field (decl, true, 3, ctx);
if (is_gimple_omp_offloaded (ctx->stmt)
&& !OMP_CLAUSE_MAP_IN_REDUCTION (c))
install_var_local (decl, ctx);
}
}
else
{
tree base = get_base_address (decl);
tree nc = OMP_CLAUSE_CHAIN (c);
if (DECL_P (base)
&& nc != NULL_TREE
&& OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_DECL (nc) == base
&& OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
&& integer_zerop (OMP_CLAUSE_SIZE (nc)))
{
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
}
else
{
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
decl = OMP_CLAUSE_DECL (c);
}
gcc_assert (!splay_tree_lookup (ctx->field_map,
(splay_tree_key) decl));
tree field
= build_decl (OMP_CLAUSE_LOCATION (c),
FIELD_DECL, NULL_TREE, ptr_type_node);
SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node));
insert_field_into_struct (ctx->record_type, field);
splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
(splay_tree_value) field);
}
}
break;
case OMP_CLAUSE__GRIDDIM_:
if (ctx->outer)
{
scan_omp_op (&OMP_CLAUSE__GRIDDIM__SIZE (c), ctx->outer);
scan_omp_op (&OMP_CLAUSE__GRIDDIM__GROUP (c), ctx->outer);
}
break;
case OMP_CLAUSE_ORDER:
ctx->order_concurrent = true;
break;
case OMP_CLAUSE_BIND:
ctx->loop_p = true;
break;
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__SIMT_:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE_TASK_REDUCTION:
break;
case OMP_CLAUSE_ALIGNED:
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (decl)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
install_var_local (decl, ctx);
break;
case OMP_CLAUSE__CONDTEMP_:
decl = OMP_CLAUSE_DECL (c);
if (is_parallel_ctx (ctx))
{
install_var_field (decl, false, 3, ctx);
install_var_local (decl, ctx);
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
&& !OMP_CLAUSE__CONDTEMP__ITER (c))
install_var_local (decl, ctx);
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LASTPRIVATE:
/* Let the corresponding firstprivate clause create
the variable. */
if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_array_reductions = true;
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_LINEAR:
case OMP_CLAUSE_IS_DEVICE_PTR:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
{
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
&& is_gimple_omp_offloaded (ctx->stmt))
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
install_var_local (decl2, ctx);
fixup_remapped_decl (decl2, ctx, false);
}
install_var_local (decl, ctx);
}
fixup_remapped_decl (decl, ctx,
OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
&& OMP_CLAUSE_PRIVATE_DEBUG (c));
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) != MEM_REF)
{
if (is_variable_sized (decl))
install_var_local (decl, ctx);
fixup_remapped_decl (decl, ctx, false);
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_TASK_REDUCTION:
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
scan_array_reductions = true;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct inside of
target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
break;
decl = OMP_CLAUSE_DECL (c);
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
break;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
ctx->outer)))
break;
bool by_ref = use_pointer_for_field (decl, ctx);
install_var_field (decl, by_ref, 11, ctx);
break;
}
fixup_remapped_decl (decl, ctx, false);
break;
case OMP_CLAUSE_MAP:
if (!is_gimple_omp_offloaded (ctx->stmt))
break;
decl = OMP_CLAUSE_DECL (c);
if (DECL_P (decl)
&& ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_REFERENCE))
|| TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
&& is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
&& varpool_node::get_create (decl)->offloadable)
break;
if (DECL_P (decl))
{
if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
&& TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
&& !COMPLETE_TYPE_P (TREE_TYPE (decl)))
{
tree new_decl = lookup_decl (decl, ctx);
TREE_TYPE (new_decl)
= remap_type (TREE_TYPE (decl), &ctx->cb);
}
else if (DECL_SIZE (decl)
&& TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
{
tree decl2 = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
fixup_remapped_decl (decl2, ctx, false);
fixup_remapped_decl (decl, ctx, true);
}
else
fixup_remapped_decl (decl, ctx, false);
}
break;
case OMP_CLAUSE_COPYPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_NUM_TEAMS:
case OMP_CLAUSE_THREAD_LIMIT:
case OMP_CLAUSE_DEVICE:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_DIST_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
case OMP_CLAUSE_PROC_BIND:
case OMP_CLAUSE_SAFELEN:
case OMP_CLAUSE_SIMDLEN:
case OMP_CLAUSE_ALIGNED:
case OMP_CLAUSE_DEPEND:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
case OMP_CLAUSE_PRIORITY:
case OMP_CLAUSE_GRAINSIZE:
case OMP_CLAUSE_NUM_TASKS:
case OMP_CLAUSE_THREADS:
case OMP_CLAUSE_SIMD:
case OMP_CLAUSE_NOGROUP:
case OMP_CLAUSE_DEFAULTMAP:
case OMP_CLAUSE_ORDER:
case OMP_CLAUSE_BIND:
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_NONTEMPORAL:
case OMP_CLAUSE_ASYNC:
case OMP_CLAUSE_WAIT:
case OMP_CLAUSE_NUM_GANGS:
case OMP_CLAUSE_NUM_WORKERS:
case OMP_CLAUSE_VECTOR_LENGTH:
case OMP_CLAUSE_GANG:
case OMP_CLAUSE_WORKER:
case OMP_CLAUSE_VECTOR:
case OMP_CLAUSE_INDEPENDENT:
case OMP_CLAUSE_AUTO:
case OMP_CLAUSE_SEQ:
case OMP_CLAUSE_TILE:
case OMP_CLAUSE__GRIDDIM_:
case OMP_CLAUSE__SIMT_:
case OMP_CLAUSE_IF_PRESENT:
case OMP_CLAUSE_FINALIZE:
case OMP_CLAUSE__CONDTEMP_:
break;
case OMP_CLAUSE__CACHE_:
default:
gcc_unreachable ();
}
}
gcc_checking_assert (!scan_array_reductions
|| !is_gimple_omp_oacc (ctx->stmt));
if (scan_array_reductions)
{
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
}
}
/* Create a new name for omp child function. Returns an identifier. */
static tree
create_omp_child_function_name (bool task_copy)
{
return clone_function_name_numbered (current_function_decl,
task_copy ? "_omp_cpyfn" : "_omp_fn");
}
/* Return true if CTX may belong to offloaded code: either if current function
is offloaded, or any enclosing context corresponds to a target region. */
static bool
omp_maybe_offloaded_ctx (omp_context *ctx)
{
if (cgraph_node::get (current_function_decl)->offloadable)
return true;
for (; ctx; ctx = ctx->outer)
if (is_gimple_omp_offloaded (ctx->stmt))
return true;
return false;
}
/* Build a decl for the omp child function. It'll not contain a body
yet, just the bare decl. */
static void
create_omp_child_function (omp_context *ctx, bool task_copy)
{
tree decl, type, name, t;
name = create_omp_child_function_name (task_copy);
if (task_copy)
type = build_function_type_list (void_type_node, ptr_type_node,
ptr_type_node, NULL_TREE);
else
type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
|| !task_copy);
if (!task_copy)
ctx->cb.dst_fn = decl;
else
gimple_omp_task_set_copy_fn (ctx->stmt, decl);
TREE_STATIC (decl) = 1;
TREE_USED (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 0;
TREE_PUBLIC (decl) = 0;
DECL_UNINLINABLE (decl) = 1;
DECL_EXTERNAL (decl) = 0;
DECL_CONTEXT (decl) = NULL_TREE;
DECL_INITIAL (decl) = make_node (BLOCK);
BLOCK_SUPERCONTEXT (DECL_INITIAL (decl)) = decl;
DECL_ATTRIBUTES (decl) = DECL_ATTRIBUTES (current_function_decl);
/* Remove omp declare simd attribute from the new attributes. */
if (tree a = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (decl)))
{
while (tree a2 = lookup_attribute ("omp declare simd", TREE_CHAIN (a)))
a = a2;
a = TREE_CHAIN (a);
for (tree *p = &DECL_ATTRIBUTES (decl); *p != a;)
if (is_attribute_p ("omp declare simd", get_attribute_name (*p)))
*p = TREE_CHAIN (*p);
else
{
tree chain = TREE_CHAIN (*p);
*p = copy_node (*p);
p = &TREE_CHAIN (*p);
*p = chain;
}
}
DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl)
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (current_function_decl);
DECL_FUNCTION_SPECIFIC_TARGET (decl)
= DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl);
DECL_FUNCTION_VERSIONED (decl)
= DECL_FUNCTION_VERSIONED (current_function_decl);
if (omp_maybe_offloaded_ctx (ctx))
{
cgraph_node::get_create (decl)->offloadable = 1;
if (ENABLE_OFFLOADING)
g->have_offload = true;
}
if (cgraph_node::get_create (decl)->offloadable
&& !lookup_attribute ("omp declare target",
DECL_ATTRIBUTES (current_function_decl)))
{
const char *target_attr = (is_gimple_omp_offloaded (ctx->stmt)
? "omp target entrypoint"
: "omp declare target");
DECL_ATTRIBUTES (decl)
= tree_cons (get_identifier (target_attr),
NULL_TREE, DECL_ATTRIBUTES (decl));
}
t = build_decl (DECL_SOURCE_LOCATION (decl),
RESULT_DECL, NULL_TREE, void_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_IGNORED_P (t) = 1;
DECL_CONTEXT (t) = decl;
DECL_RESULT (decl) = t;
tree data_name = get_identifier (".omp_data_i");
t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_READONLY (t) = 1;
DECL_ARGUMENTS (decl) = t;
if (!task_copy)
ctx->receiver_decl = t;
else
{
t = build_decl (DECL_SOURCE_LOCATION (decl),
PARM_DECL, get_identifier (".omp_data_o"),
ptr_type_node);
DECL_ARTIFICIAL (t) = 1;
DECL_NAMELESS (t) = 1;
DECL_ARG_TYPE (t) = ptr_type_node;
DECL_CONTEXT (t) = current_function_decl;
TREE_USED (t) = 1;
TREE_ADDRESSABLE (t) = 1;
DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
DECL_ARGUMENTS (decl) = t;
}
/* Allocate memory for the function structure. The call to
allocate_struct_function clobbers CFUN, so we need to restore
it afterward. */
push_struct_function (decl);
cfun->function_end_locus = gimple_location (ctx->stmt);
init_tree_ssa (cfun);
pop_cfun ();
}
/* Callback for walk_gimple_seq. Check if combined parallel
contains gimple_omp_for_combined_into_p OMP_FOR. */
tree
omp_find_combined_for (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_combined_into_p (stmt)
&& gimple_omp_for_kind (stmt)
== *(const enum gf_mask *) (wi->info))
{
wi->info = stmt;
return integer_zero_node;
}
break;
default:
break;
}
return NULL;
}
/* Add _LOOPTEMP_/_REDUCTEMP_ clauses on OpenMP parallel or task. */
static void
add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
omp_context *outer_ctx)
{
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &msk;
walk_gimple_seq (gimple_omp_body (stmt), omp_find_combined_for, NULL, &wi);
if (wi.info != (void *) &msk)
{
gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info);
struct omp_for_data fd;
omp_extract_for_data (for_stmt, &fd, NULL);
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2, i;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
{
count += fd.collapse - 1;
/* If there are lastprivate clauses on the inner
GIMPLE_OMP_FOR, add one more temporaries for the total number
of iterations (product of count1 ... countN-1). */
if (omp_find_clause (gimple_omp_for_clauses (for_stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
else if (msk == GF_OMP_FOR_KIND_FOR
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_LASTPRIVATE))
count++;
}
for (i = 0; i < count; i++)
{
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
gimple_omp_taskreg_set_clauses (stmt, c);
}
}
if (msk == GF_OMP_FOR_KIND_TASKLOOP
&& omp_find_clause (gimple_omp_task_clauses (stmt),
OMP_CLAUSE_REDUCTION))
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_task_clauses (stmt);
gimple_omp_task_set_clauses (stmt, c);
}
}
/* Scan an OpenMP parallel directive. */
static void
scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
/* Ignore parallel directives with empty bodies, unless there
are copyin clauses. */
if (optimize > 0
&& empty_body_p (gimple_omp_body (stmt))
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_COPYIN) == NULL)
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_parallel_combined_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
for (tree c = omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE_REDUCTION);
c; c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_REDUCTION))
if (OMP_CLAUSE_REDUCTION_TASK (c))
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
if (outer_ctx)
insert_decl_map (&outer_ctx->cb, temp, temp);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_parallel_clauses (stmt);
gimple_omp_parallel_set_clauses (stmt, c);
break;
}
else if (OMP_CLAUSE_CHAIN (c) == NULL_TREE)
break;
ctx = new_omp_context (stmt, outer_ctx);
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
if (!gimple_omp_parallel_grid_phony (stmt))
{
create_omp_child_function (ctx, false);
gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
}
scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
}
/* Scan an OpenMP task directive. */
static void
scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
{
omp_context *ctx;
tree name, t;
gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
/* Ignore task directives with empty bodies, unless they have depend
clause. */
if (optimize > 0
&& gimple_omp_body (stmt)
&& empty_body_p (gimple_omp_body (stmt))
&& !omp_find_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND))
{
gsi_replace (gsi, gimple_build_nop (), false);
return;
}
if (gimple_omp_task_taskloop_p (stmt))
add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
ctx = new_omp_context (stmt, outer_ctx);
if (gimple_omp_task_taskwait_p (stmt))
{
scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
return;
}
taskreg_contexts.safe_push (ctx);
if (taskreg_nesting_level > 1)
ctx->is_nested = true;
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
create_omp_child_function (ctx, false);
gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
if (ctx->srecord_type)
{
name = create_tmp_var_name (".omp_data_a");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->srecord_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->srecord_type) = name;
TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
create_omp_child_function (ctx, true);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
{
ctx->record_type = ctx->receiver_decl = NULL;
t = build_int_cst (long_integer_type_node, 0);
gimple_omp_task_set_arg_size (stmt, t);
t = build_int_cst (long_integer_type_node, 1);
gimple_omp_task_set_arg_align (stmt, t);
}
}
/* Helper function for finish_taskreg_scan, called through walk_tree.
If maybe_lookup_decl_in_outer_context returns non-NULL for some
tree, replace it in the expression. */
static tree
finish_taskreg_remap (tree *tp, int *walk_subtrees, void *data)
{
if (VAR_P (*tp))
{
omp_context *ctx = (omp_context *) data;
tree t = maybe_lookup_decl_in_outer_ctx (*tp, ctx);
if (t != *tp)
{
if (DECL_HAS_VALUE_EXPR_P (t))
t = unshare_expr (DECL_VALUE_EXPR (t));
*tp = t;
}
*walk_subtrees = 0;
}
else if (IS_TYPE_OR_DECL_P (*tp))
*walk_subtrees = 0;
return NULL_TREE;
}
/* If any decls have been made addressable during scan_omp,
adjust their fields if needed, and layout record types
of parallel/task constructs. */
static void
finish_taskreg_scan (omp_context *ctx)
{
if (ctx->record_type == NULL_TREE)
return;
/* If any task_shared_vars were needed, verify all
OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK,TEAMS}
statements if use_pointer_for_field hasn't changed
because of that. If it did, update field types now. */
if (task_shared_vars)
{
tree c;
for (c = gimple_omp_taskreg_clauses (ctx->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
{
tree decl = OMP_CLAUSE_DECL (c);
/* Global variables don't need to be copied,
the receiver side will use them directly. */
if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
continue;
if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
|| !use_pointer_for_field (decl, ctx))
continue;
tree field = lookup_field (decl, ctx);
if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
&& TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
continue;
TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
TREE_THIS_VOLATILE (field) = 0;
DECL_USER_ALIGN (field) = 0;
SET_DECL_ALIGN (field, TYPE_ALIGN (TREE_TYPE (field)));
if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (ctx->record_type, DECL_ALIGN (field));
if (ctx->srecord_type)
{
tree sfield = lookup_sfield (decl, ctx);
TREE_TYPE (sfield) = TREE_TYPE (field);
TREE_THIS_VOLATILE (sfield) = 0;
DECL_USER_ALIGN (sfield) = 0;
SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
SET_TYPE_ALIGN (ctx->srecord_type, DECL_ALIGN (sfield));
}
}
}
if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
{
tree clauses = gimple_omp_parallel_clauses (ctx->stmt);
tree c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
if (c)
{
/* Move the _reductemp_ clause first. GOMP_parallel_reductions
expects to find it at the start of data. */
tree f = lookup_field (OMP_CLAUSE_DECL (c), ctx);
tree *p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (*p == f)
{
*p = DECL_CHAIN (*p);
break;
}
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f) = TYPE_FIELDS (ctx->record_type);
TYPE_FIELDS (ctx->record_type) = f;
}
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
}
else
{
location_t loc = gimple_location (ctx->stmt);
tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
/* Move VLA fields to the end. */
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
|| ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
{
*q = *p;
*p = TREE_CHAIN (*p);
TREE_CHAIN (*q) = NULL_TREE;
q = &TREE_CHAIN (*q);
}
else
p = &DECL_CHAIN (*p);
*p = vla_fields;
if (gimple_omp_task_taskloop_p (ctx->stmt))
{
/* Move fields corresponding to first and second _looptemp_
clause first. There are filled by GOMP_taskloop
and thus need to be in specific positions. */
tree clauses = gimple_omp_task_clauses (ctx->stmt);
tree c1 = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
tree c2 = omp_find_clause (OMP_CLAUSE_CHAIN (c1),
OMP_CLAUSE__LOOPTEMP_);
tree c3 = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
tree f3 = c3 ? lookup_field (OMP_CLAUSE_DECL (c3), ctx) : NULL_TREE;
p = &TYPE_FIELDS (ctx->record_type);
while (*p)
if (*p == f1 || *p == f2 || *p == f3)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
if (c3)
{
DECL_CHAIN (f2) = f3;
DECL_CHAIN (f3) = TYPE_FIELDS (ctx->record_type);
}
else
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
TYPE_FIELDS (ctx->record_type) = f1;
if (ctx->srecord_type)
{
f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
if (c3)
f3 = lookup_sfield (OMP_CLAUSE_DECL (c3), ctx);
p = &TYPE_FIELDS (ctx->srecord_type);
while (*p)
if (*p == f1 || *p == f2 || *p == f3)
*p = DECL_CHAIN (*p);
else
p = &DECL_CHAIN (*p);
DECL_CHAIN (f1) = f2;
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
if (c3)
{
DECL_CHAIN (f2) = f3;
DECL_CHAIN (f3) = TYPE_FIELDS (ctx->srecord_type);
}
else
DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
TYPE_FIELDS (ctx->srecord_type) = f1;
}
}
layout_type (ctx->record_type);
fixup_child_record_type (ctx);
if (ctx->srecord_type)
layout_type (ctx->srecord_type);
tree t = fold_convert_loc (loc, long_integer_type_node,
TYPE_SIZE_UNIT (ctx->record_type));
if (TREE_CODE (t) != INTEGER_CST)
{
t = unshare_expr (t);
walk_tree (&t, finish_taskreg_remap, ctx, NULL);
}
gimple_omp_task_set_arg_size (ctx->stmt, t);
t = build_int_cst (long_integer_type_node,
TYPE_ALIGN_UNIT (ctx->record_type));
gimple_omp_task_set_arg_align (ctx->stmt, t);
}
}
/* Find the enclosing offload context. */
static omp_context *
enclosing_target_ctx (omp_context *ctx)
{
for (; ctx; ctx = ctx->outer)
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET)
break;
return ctx;
}
/* Return true if ctx is part of an oacc kernels region. */
static bool
ctx_in_oacc_kernels_region (omp_context *ctx)
{
for (;ctx != NULL; ctx = ctx->outer)
{
gimple *stmt = ctx->stmt;
if (gimple_code (stmt) == GIMPLE_OMP_TARGET
&& gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
return true;
}
return false;
}
/* Check the parallelism clauses inside a kernels regions.
Until kernels handling moves to use the same loop indirection
scheme as parallel, we need to do this checking early. */
static unsigned
check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx)
{
bool checking = true;
unsigned outer_mask = 0;
unsigned this_mask = 0;
bool has_seq = false, has_auto = false;
if (ctx->outer)
outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer);
if (!stmt)
{
checking = false;
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR)
return outer_mask;
stmt = as_a <gomp_for *> (ctx->stmt);
}
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG);
break;
case OMP_CLAUSE_WORKER:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER);
break;
case OMP_CLAUSE_VECTOR:
this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR);
break;
case OMP_CLAUSE_SEQ:
has_seq = true;
break;
case OMP_CLAUSE_AUTO:
has_auto = true;
break;
default:
break;
}
}
if (checking)
{
if (has_seq && (this_mask || has_auto))
error_at (gimple_location (stmt), "%<seq%> overrides other"
" OpenACC loop specifiers");
else if (has_auto && this_mask)
error_at (gimple_location (stmt), "%<auto%> conflicts with other"
" OpenACC loop specifiers");
if (this_mask & outer_mask)
error_at (gimple_location (stmt), "inner loop uses same"
" OpenACC parallelism as containing loop");
}
return outer_mask | this_mask;
}
/* Scan a GIMPLE_OMP_FOR. */
static omp_context *
scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
size_t i;
tree clauses = gimple_omp_for_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
if (is_gimple_omp_oacc (stmt))
{
omp_context *tgt = enclosing_target_ctx (outer_ctx);
if (!(tgt && is_oacc_kernels (tgt)))
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
tree c_op0;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
c_op0 = OMP_CLAUSE_GANG_EXPR (c);
break;
case OMP_CLAUSE_WORKER:
c_op0 = OMP_CLAUSE_WORKER_EXPR (c);
break;
case OMP_CLAUSE_VECTOR:
c_op0 = OMP_CLAUSE_VECTOR_EXPR (c);
break;
default:
continue;
}
if (c_op0)
{
error_at (OMP_CLAUSE_LOCATION (c),
"argument not permitted on %qs clause",
omp_clause_code_name[OMP_CLAUSE_CODE (c)]);
if (tgt)
inform (gimple_location (tgt->stmt),
"enclosing parent compute construct");
else if (oacc_get_fn_attrib (current_function_decl))
inform (DECL_SOURCE_LOCATION (current_function_decl),
"enclosing routine");
else
gcc_unreachable ();
}
}
if (tgt && is_oacc_kernels (tgt))
check_oacc_kernel_gwv (stmt, ctx);
/* Collect all variables named in reductions on this loop. Ensure
that, if this loop has a reduction on some variable v, and there is
a reduction on v somewhere in an outer context, then there is a
reduction on v on all intervening loops as well. */
tree local_reduction_clauses = NULL;
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
local_reduction_clauses
= tree_cons (NULL, c, local_reduction_clauses);
}
if (ctx->outer_reduction_clauses == NULL && ctx->outer != NULL)
ctx->outer_reduction_clauses
= chainon (unshare_expr (ctx->outer->local_reduction_clauses),
ctx->outer->outer_reduction_clauses);
tree outer_reduction_clauses = ctx->outer_reduction_clauses;
tree local_iter = local_reduction_clauses;
for (; local_iter; local_iter = TREE_CHAIN (local_iter))
{
tree local_clause = TREE_VALUE (local_iter);
tree local_var = OMP_CLAUSE_DECL (local_clause);
tree_code local_op = OMP_CLAUSE_REDUCTION_CODE (local_clause);
bool have_outer_reduction = false;
tree ctx_iter = outer_reduction_clauses;
for (; ctx_iter; ctx_iter = TREE_CHAIN (ctx_iter))
{
tree outer_clause = TREE_VALUE (ctx_iter);
tree outer_var = OMP_CLAUSE_DECL (outer_clause);
tree_code outer_op = OMP_CLAUSE_REDUCTION_CODE (outer_clause);
if (outer_var == local_var && outer_op != local_op)
{
warning_at (OMP_CLAUSE_LOCATION (local_clause), 0,
"conflicting reduction operations for %qE",
local_var);
inform (OMP_CLAUSE_LOCATION (outer_clause),
"location of the previous reduction for %qE",
outer_var);
}
if (outer_var == local_var)
{
have_outer_reduction = true;
break;
}
}
if (have_outer_reduction)
{
/* There is a reduction on outer_var both on this loop and on
some enclosing loop. Walk up the context tree until such a
loop with a reduction on outer_var is found, and complain
about all intervening loops that do not have such a
reduction. */
struct omp_context *curr_loop = ctx->outer;
bool found = false;
while (curr_loop != NULL)
{
tree curr_iter = curr_loop->local_reduction_clauses;
for (; curr_iter; curr_iter = TREE_CHAIN (curr_iter))
{
tree curr_clause = TREE_VALUE (curr_iter);
tree curr_var = OMP_CLAUSE_DECL (curr_clause);
if (curr_var == local_var)
{
found = true;
break;
}
}
if (!found)
warning_at (gimple_location (curr_loop->stmt), 0,
"nested loop in reduction needs "
"reduction clause for %qE",
local_var);
else
break;
curr_loop = curr_loop->outer;
}
}
}
ctx->local_reduction_clauses = local_reduction_clauses;
ctx->outer_reduction_clauses
= chainon (unshare_expr (ctx->local_reduction_clauses),
ctx->outer_reduction_clauses);
if (tgt && is_oacc_kernels (tgt))
{
/* Strip out reductions, as they are not handled yet. */
tree *prev_ptr = &clauses;
while (tree probe = *prev_ptr)
{
tree *next_ptr = &OMP_CLAUSE_CHAIN (probe);
if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION)
*prev_ptr = *next_ptr;
else
prev_ptr = next_ptr;
}
gimple_omp_for_set_clauses (stmt, clauses);
}
}
scan_sharing_clauses (clauses, ctx);
scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
}
scan_omp (gimple_omp_body_ptr (stmt), ctx);
return ctx;
}
/* Duplicate #pragma omp simd, one for SIMT, another one for SIMD. */
static void
scan_omp_simd (gimple_stmt_iterator *gsi, gomp_for *stmt,
omp_context *outer_ctx)
{
gbind *bind = gimple_build_bind (NULL, NULL, NULL);
gsi_replace (gsi, bind, false);
gimple_seq seq = NULL;
gimple *g = gimple_build_call_internal (IFN_GOMP_USE_SIMT, 0);
tree cond = create_tmp_var_raw (integer_type_node);
DECL_CONTEXT (cond) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (cond) = 1;
gimple_bind_set_vars (bind, cond);
gimple_call_set_lhs (g, cond);
gimple_seq_add_stmt (&seq, g);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, cond, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (&seq, g);
gimple_seq new_seq = copy_gimple_seq_and_replace_locals (stmt);
gomp_for *new_stmt = as_a <gomp_for *> (new_seq);
tree clause = build_omp_clause (gimple_location (stmt), OMP_CLAUSE__SIMT_);
OMP_CLAUSE_CHAIN (clause) = gimple_omp_for_clauses (new_stmt);
gimple_omp_for_set_clauses (new_stmt, clause);
gimple_seq_add_stmt (&seq, new_stmt);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (&seq, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (&seq, g);
gimple_seq_add_stmt (&seq, stmt);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (&seq, g);
gimple_bind_set_body (bind, seq);
update_stmt (bind);
scan_omp_for (new_stmt, outer_ctx);
scan_omp_for (stmt, outer_ctx)->simt_stmt = new_stmt;
}
static tree omp_find_scan (gimple_stmt_iterator *, bool *,
struct walk_stmt_info *);
static omp_context *maybe_lookup_ctx (gimple *);
/* Duplicate #pragma omp simd, one for the scan input phase loop and one
for scan phase loop. */
static void
scan_omp_simd_scan (gimple_stmt_iterator *gsi, gomp_for *stmt,
omp_context *outer_ctx)
{
/* The only change between inclusive and exclusive scan will be
within the first simd loop, so just use inclusive in the
worksharing loop. */
outer_ctx->scan_inclusive = true;
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_INCLUSIVE);
OMP_CLAUSE_DECL (c) = integer_zero_node;
gomp_scan *input_stmt = gimple_build_omp_scan (NULL, NULL_TREE);
gomp_scan *scan_stmt = gimple_build_omp_scan (NULL, c);
gsi_replace (gsi, input_stmt, false);
gimple_seq input_body = NULL;
gimple_seq_add_stmt (&input_body, stmt);
gsi_insert_after (gsi, scan_stmt, GSI_NEW_STMT);
gimple_stmt_iterator input1_gsi = gsi_none ();
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input1_gsi;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input1_gsi));
gimple *input_stmt1 = gsi_stmt (input1_gsi);
gsi_next (&input1_gsi);
gimple *scan_stmt1 = gsi_stmt (input1_gsi);
gcc_assert (scan_stmt1 && gimple_code (scan_stmt1) == GIMPLE_OMP_SCAN);
c = gimple_omp_scan_clauses (as_a <gomp_scan *> (scan_stmt1));
if (c && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_EXCLUSIVE)
std::swap (input_stmt1, scan_stmt1);
gimple_seq input_body1 = gimple_omp_body (input_stmt1);
gimple_omp_set_body (input_stmt1, NULL);
gimple_seq scan_body = copy_gimple_seq_and_replace_locals (stmt);
gomp_for *new_stmt = as_a <gomp_for *> (scan_body);
gimple_omp_set_body (input_stmt1, input_body1);
gimple_omp_set_body (scan_stmt1, NULL);
gimple_stmt_iterator input2_gsi = gsi_none ();
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input2_gsi;
walk_gimple_seq_mod (gimple_omp_body_ptr (new_stmt), omp_find_scan,
NULL, &wi);
gcc_assert (!gsi_end_p (input2_gsi));
gimple *input_stmt2 = gsi_stmt (input2_gsi);
gsi_next (&input2_gsi);
gimple *scan_stmt2 = gsi_stmt (input2_gsi);
gcc_assert (scan_stmt2 && gimple_code (scan_stmt2) == GIMPLE_OMP_SCAN);
if (c && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_EXCLUSIVE)
std::swap (input_stmt2, scan_stmt2);
gimple_omp_set_body (input_stmt2, NULL);
gimple_omp_set_body (input_stmt, input_body);
gimple_omp_set_body (scan_stmt, scan_body);
omp_context *ctx = new_omp_context (input_stmt, outer_ctx);
scan_omp (gimple_omp_body_ptr (input_stmt), ctx);
ctx = new_omp_context (scan_stmt, outer_ctx);
scan_omp (gimple_omp_body_ptr (scan_stmt), ctx);
maybe_lookup_ctx (new_stmt)->for_simd_scan_phase = true;
}
/* Scan an OpenMP sections directive. */
static void
scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
ctx = new_omp_context (stmt, outer_ctx);
scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
}
/* Scan an OpenMP single directive. */
static void
scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_copy_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
TYPE_NAME (ctx->record_type) = name;
scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = NULL;
else
layout_type (ctx->record_type);
}
/* Scan a GIMPLE_OMP_TARGET. */
static void
scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
{
omp_context *ctx;
tree name;
bool offloaded = is_gimple_omp_offloaded (stmt);
tree clauses = gimple_omp_target_clauses (stmt);
ctx = new_omp_context (stmt, outer_ctx);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
name = create_tmp_var_name (".omp_data_t");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
if (offloaded)
{
create_omp_child_function (ctx, false);
gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
}
scan_sharing_clauses (clauses, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
else
{
TYPE_FIELDS (ctx->record_type)
= nreverse (TYPE_FIELDS (ctx->record_type));
if (flag_checking)
{
unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
for (tree field = TYPE_FIELDS (ctx->record_type);
field;
field = DECL_CHAIN (field))
gcc_assert (DECL_ALIGN (field) == align);
}
layout_type (ctx->record_type);
if (offloaded)
fixup_child_record_type (ctx);
}
}
/* Scan an OpenMP teams directive. */
static void
scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
{
omp_context *ctx = new_omp_context (stmt, outer_ctx);
if (!gimple_omp_teams_host (stmt))
{
scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
return;
}
taskreg_contexts.safe_push (ctx);
gcc_assert (taskreg_nesting_level == 1);
ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
tree name = create_tmp_var_name (".omp_data_s");
name = build_decl (gimple_location (stmt),
TYPE_DECL, name, ctx->record_type);
DECL_ARTIFICIAL (name) = 1;
DECL_NAMELESS (name) = 1;
TYPE_NAME (ctx->record_type) = name;
TYPE_ARTIFICIAL (ctx->record_type) = 1;
create_omp_child_function (ctx, false);
gimple_omp_teams_set_child_fn (stmt, ctx->cb.dst_fn);
scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
if (TYPE_FIELDS (ctx->record_type) == NULL)
ctx->record_type = ctx->receiver_decl = NULL;
}
/* Check nesting restrictions. */
static bool
check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
{
tree c;
if (ctx && gimple_code (ctx->stmt) == GIMPLE_OMP_GRID_BODY)
/* GRID_BODY is an artificial construct, nesting rules will be checked in
the original copy of its contents. */
return true;
/* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
inside an OpenACC CTX. */
if (!(is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt))
/* Except for atomic codes that we share with OpenMP. */
&& !(gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
{
if (oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC routine");
return false;
}
else
for (omp_context *octx = ctx; octx != NULL; octx = octx->outer)
if (is_gimple_omp (octx->stmt)
&& is_gimple_omp_oacc (octx->stmt))
{
error_at (gimple_location (stmt),
"non-OpenACC construct inside of OpenACC region");
return false;
}
}
if (ctx != NULL)
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SCAN
&& ctx->outer
&& gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
ctx = ctx->outer;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
&& !ctx->loop_p)
{
c = NULL_TREE;
if (ctx->order_concurrent
&& (gimple_code (stmt) == GIMPLE_OMP_ORDERED
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
{
error_at (gimple_location (stmt),
"OpenMP constructs other than %<parallel%>, %<loop%>"
" or %<simd%> may not be nested inside a region with"
" the %<order(concurrent)%> clause");
return false;
}
if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
{
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
if (omp_find_clause (c, OMP_CLAUSE_THREADS)
&& (ctx->outer == NULL
|| !gimple_omp_for_combined_into_p (ctx->stmt)
|| gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR
|| (gimple_omp_for_kind (ctx->outer->stmt)
!= GF_OMP_FOR_KIND_FOR)
|| !gimple_omp_for_combined_p (ctx->outer->stmt)))
{
error_at (gimple_location (stmt),
"%<ordered simd threads%> must be closely "
"nested inside of %<for simd%> region");
return false;
}
return true;
}
}
else if (gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
|| gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE
|| gimple_code (stmt) == GIMPLE_OMP_SCAN)
return true;
else if (gimple_code (stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
return true;
error_at (gimple_location (stmt),
"OpenMP constructs other than "
"%<ordered simd%>, %<simd%>, %<loop%> or %<atomic%> may "
"not be nested inside %<simd%> region");
return false;
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
{
if ((gimple_code (stmt) != GIMPLE_OMP_FOR
|| (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE
&& gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP
&& omp_find_clause (gimple_omp_for_clauses (stmt),
OMP_CLAUSE_BIND) == NULL_TREE))
&& gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
{
error_at (gimple_location (stmt),
"only %<distribute%>, %<parallel%> or %<loop%> "
"regions are allowed to be strictly nested inside "
"%<teams%> region");
return false;
}
}
else if (ctx->order_concurrent
&& gimple_code (stmt) != GIMPLE_OMP_PARALLEL
&& (gimple_code (stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_SIMD)
&& gimple_code (stmt) != GIMPLE_OMP_SCAN)
{
if (ctx->loop_p)
error_at (gimple_location (stmt),
"OpenMP constructs other than %<parallel%>, %<loop%> or "
"%<simd%> may not be nested inside a %<loop%> region");
else
error_at (gimple_location (stmt),
"OpenMP constructs other than %<parallel%>, %<loop%> or "
"%<simd%> may not be nested inside a region with "
"the %<order(concurrent)%> clause");
return false;
}
}
switch (gimple_code (stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD)
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
{
error_at (gimple_location (stmt),
"%<distribute%> region must be strictly nested "
"inside %<teams%> construct");
return false;
}
return true;
}
/* We split taskloop into task and nested taskloop in it. */
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
return true;
/* For now, hope this will change and loop bind(parallel) will not
be allowed in lots of contexts. */
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
&& omp_find_clause (gimple_omp_for_clauses (stmt), OMP_CLAUSE_BIND))
return true;
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
{
bool ok = false;
if (ctx)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
ok = (gimple_omp_for_kind (ctx->stmt)
== GF_OMP_FOR_KIND_OACC_LOOP);
break;
case GIMPLE_OMP_TARGET:
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
ok = true;
break;
default:
break;
}
default:
break;
}
else if (oacc_get_fn_attrib (current_function_decl))
ok = true;
if (!ok)
{
error_at (gimple_location (stmt),
"OpenACC loop directive must be associated with"
" an OpenACC compute region");
return false;
}
}
/* FALLTHRU */
case GIMPLE_CALL:
if (is_gimple_call (stmt)
&& (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
|| DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCELLATION_POINT))
{
const char *bad = NULL;
const char *kind = NULL;
const char *construct
= (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL)
? "cancel"
: "cancellation point";
if (ctx == NULL)
{
error_at (gimple_location (stmt), "orphaned %qs construct",
construct);
return false;
}
switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
? tree_to_shwi (gimple_call_arg (stmt, 0))
: 0)
{
case 1:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
bad = "parallel";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
ctx->cancellable = true;
kind = "parallel";
break;
case 2:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
bad = "for";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<cancel for%> inside "
"%<nowait%> for construct");
if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED))
warning_at (gimple_location (stmt), 0,
"%<cancel for%> inside "
"%<ordered%> for construct");
}
kind = "for";
break;
case 4:
if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
&& gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
bad = "sections";
else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
== BUILT_IN_GOMP_CANCEL
&& !integer_zerop (gimple_call_arg (stmt, 1)))
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
{
ctx->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<cancel sections%> inside "
"%<nowait%> sections construct");
}
else
{
gcc_assert (ctx->outer
&& gimple_code (ctx->outer->stmt)
== GIMPLE_OMP_SECTIONS);
ctx->outer->cancellable = true;
if (omp_find_clause (gimple_omp_sections_clauses
(ctx->outer->stmt),
OMP_CLAUSE_NOWAIT))
warning_at (gimple_location (stmt), 0,
"%<cancel sections%> inside "
"%<nowait%> sections construct");
}
}
kind = "sections";
break;
case 8:
if (!is_task_ctx (ctx)
&& (!is_taskloop_ctx (ctx)
|| ctx->outer == NULL
|| !is_task_ctx (ctx->outer)))
bad = "task";
else
{
for (omp_context *octx = ctx->outer;
octx; octx = octx->outer)
{
switch (gimple_code (octx->stmt))
{
case GIMPLE_OMP_TASKGROUP:
break;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (octx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
continue;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<%s taskgroup%> construct not closely "
"nested inside of %<taskgroup%> region",
construct);
return false;
case GIMPLE_OMP_TASK:
if (gimple_omp_task_taskloop_p (octx->stmt)
&& octx->outer
&& is_taskloop_ctx (octx->outer))
{
tree clauses
= gimple_omp_for_clauses (octx->outer->stmt);
if (!omp_find_clause (clauses, OMP_CLAUSE_NOGROUP))
break;
}
continue;
default:
continue;
}
break;
}
ctx->cancellable = true;
}
kind = "taskgroup";
break;
default:
error_at (gimple_location (stmt), "invalid arguments");
return false;
}
if (bad)
{
error_at (gimple_location (stmt),
"%<%s %s%> construct not closely nested inside of %qs",
construct, kind, bad);
return false;
}
}
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_CRITICAL:
if (is_gimple_call (stmt))
{
if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
!= BUILT_IN_GOMP_BARRIER)
return true;
error_at (gimple_location (stmt),
"barrier region may not be closely nested inside "
"of work-sharing, %<loop%>, %<critical%>, "
"%<ordered%>, %<master%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
}
error_at (gimple_location (stmt),
"work-sharing region may not be closely nested inside "
"of work-sharing, %<loop%>, %<critical%>, %<ordered%>, "
"%<master%>, explicit %<task%> or %<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_MASTER:
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
&& gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
break;
/* FALLTHRU */
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_TASK:
error_at (gimple_location (stmt),
"%<master%> region may not be closely nested inside "
"of work-sharing, %<loop%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
== GF_OMP_TARGET_KIND_REGION)
return true;
break;
default:
break;
}
break;
case GIMPLE_OMP_TASK:
for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
break;
case GIMPLE_OMP_ORDERED:
for (c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
c; c = OMP_CLAUSE_CHAIN (c))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
{
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREADS
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SIMD);
continue;
}
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
if (kind == OMP_CLAUSE_DEPEND_SOURCE
|| kind == OMP_CLAUSE_DEPEND_SINK)
{
tree oclause;
/* Look for containing ordered(N) loop. */
if (ctx == NULL
|| gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| (oclause
= omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED)) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside an %<ordered%> "
"loop");
return false;
}
else if (OMP_CLAUSE_ORDERED_EXPR (oclause) == NULL_TREE)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%<ordered%> construct with %<depend%> clause "
"must be closely nested inside a loop with "
"%<ordered%> clause with a parameter");
return false;
}
}
else
{
error_at (OMP_CLAUSE_LOCATION (c),
"invalid depend kind in omp %<ordered%> %<depend%>");
return false;
}
}
c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
if (omp_find_clause (c, OMP_CLAUSE_SIMD))
{
/* ordered simd must be closely nested inside of simd region,
and simd region must not encounter constructs other than
ordered simd, therefore ordered simd may be either orphaned,
or ctx->stmt must be simd. The latter case is handled already
earlier. */
if (ctx != NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> %<simd%> must be closely nested inside "
"%<simd%> region");
return false;
}
}
for (; ctx != NULL; ctx = ctx->outer)
switch (gimple_code (ctx->stmt))
{
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_ORDERED:
ordered_in_taskloop:
error_at (gimple_location (stmt),
"%<ordered%> region may not be closely nested inside "
"of %<critical%>, %<ordered%>, explicit %<task%> or "
"%<taskloop%> region");
return false;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP)
goto ordered_in_taskloop;
tree o;
o = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_ORDERED);
if (o == NULL)
{
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
}
if (OMP_CLAUSE_ORDERED_EXPR (o) != NULL_TREE
&& omp_find_clause (c, OMP_CLAUSE_DEPEND) == NULL_TREE)
{
error_at (gimple_location (stmt),
"%<ordered%> region without %<depend%> clause may "
"not be closely nested inside a loop region with "
"an %<ordered%> clause with a parameter");
return false;
}
return true;
case GIMPLE_OMP_TARGET:
if (gimple_omp_target_kind (ctx->stmt)
!= GF_OMP_TARGET_KIND_REGION)
break;
/* FALLTHRU */
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TEAMS:
error_at (gimple_location (stmt),
"%<ordered%> region must be closely nested inside "
"a loop region with an %<ordered%> clause");
return false;
default:
break;
}
break;
case GIMPLE_OMP_CRITICAL:
{
tree this_stmt_name
= gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
for (; ctx != NULL; ctx = ctx->outer)
if (gomp_critical *other_crit
= dyn_cast <gomp_critical *> (ctx->stmt))
if (this_stmt_name == gimple_omp_critical_name (other_crit))
{
error_at (gimple_location (stmt),
"%<critical%> region may not be nested inside "
"a %<critical%> region with the same name");
return false;
}
}
break;
case GIMPLE_OMP_TEAMS:
if (ctx == NULL)
break;
else if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
|| (gimple_omp_target_kind (ctx->stmt)
!= GF_OMP_TARGET_KIND_REGION))
{
/* Teams construct can appear either strictly nested inside of
target construct with no intervening stmts, or can be encountered
only by initial task (so must not appear inside any OpenMP
construct. */
error_at (gimple_location (stmt),
"%<teams%> construct must be closely nested inside of "
"%<target%> construct or not nested in any OpenMP "
"construct");
return false;
}
break;
case GIMPLE_OMP_TARGET:
for (c = gimple_omp_target_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
|| OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
{
enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
error_at (OMP_CLAUSE_LOCATION (c),
"%<depend(%s)%> is only allowed in %<omp ordered%>",
kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
return false;
}
if (is_gimple_omp_offloaded (stmt)
&& oacc_get_fn_attrib (cfun->decl) != NULL)
{
error_at (gimple_location (stmt),
"OpenACC region inside of OpenACC routine, nested "
"parallelism not supported yet");
return false;
}
for (; ctx != NULL; ctx = ctx->outer)
{
if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
{
if (is_gimple_omp (stmt)
&& is_gimple_omp_oacc (stmt)
&& is_gimple_omp (ctx->stmt))
{
error_at (gimple_location (stmt),
"OpenACC construct inside of non-OpenACC region");
return false;
}
continue;
}
const char *stmt_name, *ctx_stmt_name;
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
case GF_OMP_TARGET_KIND_ENTER_DATA:
stmt_name = "target enter data"; break;
case GF_OMP_TARGET_KIND_EXIT_DATA:
stmt_name = "target exit data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_SERIAL: stmt_name = "serial"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
stmt_name = "enter/exit data"; break;
case GF_OMP_TARGET_KIND_OACC_DECLARE: stmt_name = "declare"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA: stmt_name = "host_data";
break;
default: gcc_unreachable ();
}
switch (gimple_omp_target_kind (ctx->stmt))
{
case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
ctx_stmt_name = "parallel"; break;
case GF_OMP_TARGET_KIND_OACC_KERNELS:
ctx_stmt_name = "kernels"; break;
case GF_OMP_TARGET_KIND_OACC_SERIAL:
ctx_stmt_name = "serial"; break;
case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
ctx_stmt_name = "host_data"; break;
default: gcc_unreachable ();
}
/* OpenACC/OpenMP mismatch? */
if (is_gimple_omp_oacc (stmt)
!= is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%s %qs construct inside of %s %qs region",
(is_gimple_omp_oacc (stmt)
? "OpenACC" : "OpenMP"), stmt_name,
(is_gimple_omp_oacc (ctx->stmt)
? "OpenACC" : "OpenMP"), ctx_stmt_name);
return false;
}
if (is_gimple_omp_offloaded (ctx->stmt))
{
/* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
if (is_gimple_omp_oacc (ctx->stmt))
{
error_at (gimple_location (stmt),
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
return false;
}
else
{
warning_at (gimple_location (stmt), 0,
"%qs construct inside of %qs region",
stmt_name, ctx_stmt_name);
}
}
}
break;
default:
break;
}
return true;
}
/* Helper function scan_omp.
Callback for walk_tree or operators in walk_gimple_stmt used to
scan for OMP directives in TP. */
static tree
scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
omp_context *ctx = (omp_context *) wi->info;
tree t = *tp;
switch (TREE_CODE (t))
{
case VAR_DECL:
case PARM_DECL:
case LABEL_DECL:
case RESULT_DECL:
if (ctx)
{
tree repl = remap_decl (t, &ctx->cb);
gcc_checking_assert (TREE_CODE (repl) != ERROR_MARK);
*tp = repl;
}
break;
default:
if (ctx && TYPE_P (t))
*tp = remap_type (t, &ctx->cb);
else if (!DECL_P (t))
{
*walk_subtrees = 1;
if (ctx)
{
tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
if (tem != TREE_TYPE (t))
{
if (TREE_CODE (t) == INTEGER_CST)
*tp = wide_int_to_tree (tem, wi::to_wide (t));
else
TREE_TYPE (t) = tem;
}
}
}
break;
}
return NULL_TREE;
}
/* Return true if FNDECL is a setjmp or a longjmp. */
static bool
setjmp_or_longjmp_p (const_tree fndecl)
{
if (fndecl_built_in_p (fndecl, BUILT_IN_SETJMP)
|| fndecl_built_in_p (fndecl, BUILT_IN_LONGJMP))
return true;
tree declname = DECL_NAME (fndecl);
if (!declname
|| (DECL_CONTEXT (fndecl) != NULL_TREE
&& TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
|| !TREE_PUBLIC (fndecl))
return false;
const char *name = IDENTIFIER_POINTER (declname);
return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
}
/* Return true if FNDECL is an omp_* runtime API call. */
static bool
omp_runtime_api_call (const_tree fndecl)
{
tree declname = DECL_NAME (fndecl);
if (!declname
|| (DECL_CONTEXT (fndecl) != NULL_TREE
&& TREE_CODE (DECL_CONTEXT (fndecl)) != TRANSLATION_UNIT_DECL)
|| !TREE_PUBLIC (fndecl))
return false;
const char *name = IDENTIFIER_POINTER (declname);
if (strncmp (name, "omp_", 4) != 0)
return false;
static const char *omp_runtime_apis[] =
{
/* This array has 3 sections. First omp_* calls that don't
have any suffixes. */
"target_alloc",
"target_associate_ptr",
"target_disassociate_ptr",
"target_free",
"target_is_present",
"target_memcpy",
"target_memcpy_rect",
NULL,
/* Now omp_* calls that are available as omp_* and omp_*_. */
"capture_affinity",
"destroy_lock",
"destroy_nest_lock",
"display_affinity",
"get_active_level",
"get_affinity_format",
"get_cancellation",
"get_default_device",
"get_dynamic",
"get_initial_device",
"get_level",
"get_max_active_levels",
"get_max_task_priority",
"get_max_threads",
"get_nested",
"get_num_devices",
"get_num_places",
"get_num_procs",
"get_num_teams",
"get_num_threads",
"get_partition_num_places",
"get_place_num",
"get_proc_bind",
"get_team_num",
"get_thread_limit",
"get_thread_num",
"get_wtick",
"get_wtime",
"in_final",
"in_parallel",
"init_lock",
"init_nest_lock",
"is_initial_device",
"pause_resource",
"pause_resource_all",
"set_affinity_format",
"set_lock",
"set_nest_lock",
"test_lock",
"test_nest_lock",
"unset_lock",
"unset_nest_lock",
NULL,
/* And finally calls available as omp_*, omp_*_ and omp_*_8_. */
"get_ancestor_thread_num",
"get_partition_place_nums",
"get_place_num_procs",
"get_place_proc_ids",
"get_schedule",
"get_team_size",
"set_default_device",
"set_dynamic",
"set_max_active_levels",
"set_nested",
"set_num_threads",
"set_schedule"
};
int mode = 0;
for (unsigned i = 0; i < ARRAY_SIZE (omp_runtime_apis); i++)
{
if (omp_runtime_apis[i] == NULL)
{
mode++;
continue;
}
size_t len = strlen (omp_runtime_apis[i]);
if (strncmp (name + 4, omp_runtime_apis[i], len) == 0
&& (name[4 + len] == '\0'
|| (mode > 0
&& name[4 + len] == '_'
&& (name[4 + len + 1] == '\0'
|| (mode > 1
&& strcmp (name + 4 + len + 1, "8_") == 0)))))
return true;
}
return false;
}
/* Helper function for scan_omp.
Callback for walk_gimple_stmt used to scan for OMP directives in
the current statement in GSI. */
static tree
scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi);
omp_context *ctx = (omp_context *) wi->info;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
/* Check the nesting restrictions. */
bool remove = false;
if (is_gimple_omp (stmt))
remove = !check_omp_nesting_restrictions (stmt, ctx);
else if (is_gimple_call (stmt))
{
tree fndecl = gimple_call_fndecl (stmt);
if (fndecl)
{
if (ctx
&& gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD
&& setjmp_or_longjmp_p (fndecl)
&& !ctx->loop_p)
{
remove = true;
error_at (gimple_location (stmt),
"setjmp/longjmp inside %<simd%> construct");
}
else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
case BUILT_IN_GOMP_TASKYIELD:
case BUILT_IN_GOMP_TASKWAIT:
case BUILT_IN_GOMP_TASKGROUP_START:
case BUILT_IN_GOMP_TASKGROUP_END:
remove = !check_omp_nesting_restrictions (stmt, ctx);
break;
default:
break;
}
else if (ctx)
{
omp_context *octx = ctx;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_SCAN && ctx->outer)
octx = ctx->outer;
if (octx->order_concurrent && omp_runtime_api_call (fndecl))
{
remove = true;
error_at (gimple_location (stmt),
"OpenMP runtime API call %qD in a region with "
"%<order(concurrent)%> clause", fndecl);
}
}
}
}
if (remove)
{
stmt = gimple_build_nop ();
gsi_replace (gsi, stmt, false);
}
*handled_ops_p = true;
switch (gimple_code (stmt))
{
case GIMPLE_OMP_PARALLEL:
taskreg_nesting_level++;
scan_omp_parallel (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_TASK:
taskreg_nesting_level++;
scan_omp_task (gsi, ctx);
taskreg_nesting_level--;
break;
case GIMPLE_OMP_FOR:
if ((gimple_omp_for_kind (as_a <gomp_for *> (stmt))
== GF_OMP_FOR_KIND_SIMD)
&& gimple_omp_for_combined_into_p (stmt)
&& gimple_code (ctx->stmt) != GIMPLE_OMP_SCAN)
{
tree clauses = gimple_omp_for_clauses (as_a <gomp_for *> (stmt));
tree c = omp_find_clause (clauses, OMP_CLAUSE_REDUCTION);
if (c && OMP_CLAUSE_REDUCTION_INSCAN (c) && !seen_error ())
{
scan_omp_simd_scan (gsi, as_a <gomp_for *> (stmt), ctx);
break;
}
}
if ((gimple_omp_for_kind (as_a <gomp_for *> (stmt))
== GF_OMP_FOR_KIND_SIMD)
&& omp_maybe_offloaded_ctx (ctx)
&& omp_max_simt_vf ())
scan_omp_simd (gsi, as_a <gomp_for *> (stmt), ctx);
else
scan_omp_for (as_a <gomp_for *> (stmt), ctx);
break;
case GIMPLE_OMP_SECTIONS:
scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
break;
case GIMPLE_OMP_SINGLE:
scan_omp_single (as_a <gomp_single *> (stmt), ctx);
break;
case GIMPLE_OMP_SCAN:
if (tree clauses = gimple_omp_scan_clauses (as_a <gomp_scan *> (stmt)))
{
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_INCLUSIVE)
ctx->scan_inclusive = true;
else if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_EXCLUSIVE)
ctx->scan_exclusive = true;
}
/* FALLTHRU */
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_GRID_BODY:
ctx = new_omp_context (stmt, ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
case GIMPLE_OMP_TASKGROUP:
ctx = new_omp_context (stmt, ctx);
scan_sharing_clauses (gimple_omp_taskgroup_clauses (stmt), ctx);
scan_omp (gimple_omp_body_ptr (stmt), ctx);
break;
case GIMPLE_OMP_TARGET:
if (is_gimple_omp_offloaded (stmt))
{
taskreg_nesting_level++;
scan_omp_target (as_a <gomp_target *> (stmt), ctx);
taskreg_nesting_level--;
}
else
scan_omp_target (as_a <gomp_target *> (stmt), ctx);
break;
case GIMPLE_OMP_TEAMS:
if (gimple_omp_teams_host (as_a <gomp_teams *> (stmt)))
{
taskreg_nesting_level++;
scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
taskreg_nesting_level--;
}
else
scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
break;
case GIMPLE_BIND:
{
tree var;
*handled_ops_p = false;
if (ctx)
for (var = gimple_bind_vars (as_a <gbind *> (stmt));
var ;
var = DECL_CHAIN (var))
insert_decl_map (&ctx->cb, var, var);
}
break;
default:
*handled_ops_p = false;
break;
}
return NULL_TREE;
}
/* Scan all the statements starting at the current statement. CTX
contains context information about the OMP directives and
clauses found during the scan. */
static void
scan_omp (gimple_seq *body_p, omp_context *ctx)
{
location_t saved_location;
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.info = ctx;
wi.want_locations = true;
saved_location = input_location;
walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
input_location = saved_location;
}
/* Re-gimplification and code generation routines. */
/* Remove omp_member_access_dummy_var variables from gimple_bind_vars
of BIND if in a method. */
static void
maybe_remove_omp_member_access_dummy_vars (gbind *bind)
{
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
{
tree vars = gimple_bind_vars (bind);
for (tree *pvar = &vars; *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
gimple_bind_set_vars (bind, vars);
}
}
/* Remove omp_member_access_dummy_var variables from BLOCK_VARS of
block and its subblocks. */
static void
remove_member_access_dummy_vars (tree block)
{
for (tree *pvar = &BLOCK_VARS (block); *pvar; )
if (omp_member_access_dummy_var (*pvar))
*pvar = DECL_CHAIN (*pvar);
else
pvar = &DECL_CHAIN (*pvar);
for (block = BLOCK_SUBBLOCKS (block); block; block = BLOCK_CHAIN (block))
remove_member_access_dummy_vars (block);
}
/* If a context was created for STMT when it was scanned, return it. */
static omp_context *
maybe_lookup_ctx (gimple *stmt)
{
splay_tree_node n;
n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
return n ? (omp_context *) n->value : NULL;
}
/* Find the mapping for DECL in CTX or the immediately enclosing
context that has a mapping for DECL.
If CTX is a nested parallel directive, we may have to use the decl
mappings created in CTX's parent context. Suppose that we have the
following parallel nesting (variable UIDs showed for clarity):
iD.1562 = 0;
#omp parallel shared(iD.1562) -> outer parallel
iD.1562 = iD.1562 + 1;
#omp parallel shared (iD.1562) -> inner parallel
iD.1562 = iD.1562 - 1;
Each parallel structure will create a distinct .omp_data_s structure
for copying iD.1562 in/out of the directive:
outer parallel .omp_data_s.1.i -> iD.1562
inner parallel .omp_data_s.2.i -> iD.1562
A shared variable mapping will produce a copy-out operation before
the parallel directive and a copy-in operation after it. So, in
this case we would have:
iD.1562 = 0;
.omp_data_o.1.i = iD.1562;
#omp parallel shared(iD.1562) -> outer parallel
.omp_data_i.1 = &.omp_data_o.1
.omp_data_i.1->i = .omp_data_i.1->i + 1;
.omp_data_o.2.i = iD.1562; -> **
#omp parallel shared(iD.1562) -> inner parallel
.omp_data_i.2 = &.omp_data_o.2
.omp_data_i.2->i = .omp_data_i.2->i - 1;
** This is a problem. The symbol iD.1562 cannot be referenced
inside the body of the outer parallel region. But since we are
emitting this copy operation while expanding the inner parallel
directive, we need to access the CTX structure of the outer
parallel directive to get the correct mapping:
.omp_data_o.2.i = .omp_data_i.1->i
Since there may be other workshare or parallel directives enclosing
the parallel directive, it may be necessary to walk up the context
parent chain. This is not a problem in general because nested
parallelism happens only rarely. */
static tree
lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
gcc_assert (!ctx->is_nested || t || is_global_var (decl));
return t ? t : decl;
}
/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
in outer contexts. */
static tree
maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
{
tree t = NULL;
omp_context *up;
for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
t = maybe_lookup_decl (decl, up);
return t ? t : decl;
}
/* Construct the initialization value for reduction operation OP. */
tree
omp_reduction_init_op (location_t loc, enum tree_code op, tree type)
{
switch (op)
{
case PLUS_EXPR:
case MINUS_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_XOR_EXPR:
case NE_EXPR:
return build_zero_cst (type);
case MULT_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_ANDIF_EXPR:
case EQ_EXPR:
return fold_convert_loc (loc, type, integer_one_node);
case BIT_AND_EXPR:
return fold_convert_loc (loc, type, integer_minus_one_node);
case MAX_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max, min;
if (HONOR_INFINITIES (type))
{
real_inf (&max);
real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
}
else
real_maxval (&min, 1, TYPE_MODE (type));
return build_real (type, min);
}
else if (POINTER_TYPE_P (type))
{
wide_int min
= wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, min);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MIN_VALUE (type);
}
case MIN_EXPR:
if (SCALAR_FLOAT_TYPE_P (type))
{
REAL_VALUE_TYPE max;
if (HONOR_INFINITIES (type))
real_inf (&max);
else
real_maxval (&max, 0, TYPE_MODE (type));
return build_real (type, max);
}
else if (POINTER_TYPE_P (type))
{
wide_int max
= wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
return wide_int_to_tree (type, max);
}
else
{
gcc_assert (INTEGRAL_TYPE_P (type));
return TYPE_MAX_VALUE (type);
}
default:
gcc_unreachable ();
}
}
/* Construct the initialization value for reduction CLAUSE. */
tree
omp_reduction_init (tree clause, tree type)
{
return omp_reduction_init_op (OMP_CLAUSE_LOCATION (clause),
OMP_CLAUSE_REDUCTION_CODE (clause), type);
}
/* Return alignment to be assumed for var in CLAUSE, which should be
OMP_CLAUSE_ALIGNED. */
static tree
omp_clause_aligned_alignment (tree clause)
{
if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
/* Otherwise return implementation defined alignment. */
unsigned int al = 1;
opt_scalar_mode mode_iter;
auto_vector_modes modes;
targetm.vectorize.autovectorize_vector_modes (&modes, true);
static enum mode_class classes[]
= { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
for (int i = 0; i < 4; i += 2)
/* The for loop above dictates that we only walk through scalar classes. */
FOR_EACH_MODE_IN_CLASS (mode_iter, classes[i])
{
scalar_mode mode = mode_iter.require ();
machine_mode vmode = targetm.vectorize.preferred_simd_mode (mode);
if (GET_MODE_CLASS (vmode) != classes[i + 1])
continue;
machine_mode alt_vmode;
for (unsigned int j = 0; j < modes.length (); ++j)
if (related_vector_mode (modes[j], mode).exists (&alt_vmode)
&& known_ge (GET_MODE_SIZE (alt_vmode), GET_MODE_SIZE (vmode)))
vmode = alt_vmode;
tree type = lang_hooks.types.type_for_mode (mode, 1);
if (type == NULL_TREE || TYPE_MODE (type) != mode)
continue;
type = build_vector_type_for_mode (type, vmode);
if (TYPE_MODE (type) != vmode)
continue;
if (TYPE_ALIGN_UNIT (type) > al)
al = TYPE_ALIGN_UNIT (type);
}
return build_int_cst (integer_type_node, al);
}
/* This structure is part of the interface between lower_rec_simd_input_clauses
and lower_rec_input_clauses. */
class omplow_simd_context {
public:
omplow_simd_context () { memset (this, 0, sizeof (*this)); }
tree idx;
tree lane;
tree lastlane;
vec<tree, va_heap> simt_eargs;
gimple_seq simt_dlist;
poly_uint64_pod max_vf;
bool is_simt;
};
/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
privatization. */
static bool
lower_rec_simd_input_clauses (tree new_var, omp_context *ctx,
omplow_simd_context *sctx, tree &ivar,
tree &lvar, tree *rvar = NULL,
tree *rvar2 = NULL)
{
if (known_eq (sctx->max_vf, 0U))
{
sctx->max_vf = sctx->is_simt ? omp_max_simt_vf () : omp_max_vf ();
if (maybe_gt (sctx->max_vf, 1U))
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
if (c)
{
poly_uint64 safe_len;
if (!poly_int_tree_p (OMP_CLAUSE_SAFELEN_EXPR (c), &safe_len)
|| maybe_lt (safe_len, 1U))
sctx->max_vf = 1;
else
sctx->max_vf = lower_bound (sctx->max_vf, safe_len);
}
}
if (maybe_gt (sctx->max_vf, 1U))
{
sctx->idx = create_tmp_var (unsigned_type_node);
sctx->lane = create_tmp_var (unsigned_type_node);
}
}
if (known_eq (sctx->max_vf, 1U))
return false;
if (sctx->is_simt)
{
if (is_gimple_reg (new_var))
{
ivar = lvar = new_var;
return true;
}
tree type = TREE_TYPE (new_var), ptype = build_pointer_type (type);
ivar = lvar = create_tmp_var (type);
TREE_ADDRESSABLE (ivar) = 1;
DECL_ATTRIBUTES (ivar) = tree_cons (get_identifier ("omp simt private"),
NULL, DECL_ATTRIBUTES (ivar));
sctx->simt_eargs.safe_push (build1 (ADDR_EXPR, ptype, ivar));
tree clobber = build_clobber (type);
gimple *g = gimple_build_assign (ivar, clobber);
gimple_seq_add_stmt (&sctx->simt_dlist, g);
}
else
{
tree atype = build_array_type_nelts (TREE_TYPE (new_var), sctx->max_vf);
tree avar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (avar) = 1;
DECL_ATTRIBUTES (avar)
= tree_cons (get_identifier ("omp simd array"), NULL,
DECL_ATTRIBUTES (avar));
gimple_add_tmp_var (avar);
tree iavar = avar;
if (rvar && !ctx->for_simd_scan_phase)
{
/* For inscan reductions, create another array temporary,
which will hold the reduced value. */
iavar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (iavar) = 1;
DECL_ATTRIBUTES (iavar)
= tree_cons (get_identifier ("omp simd array"), NULL,
tree_cons (get_identifier ("omp simd inscan"), NULL,
DECL_ATTRIBUTES (iavar)));
gimple_add_tmp_var (iavar);
ctx->cb.decl_map->put (avar, iavar);
if (sctx->lastlane == NULL_TREE)
sctx->lastlane = create_tmp_var (unsigned_type_node);
*rvar = build4 (ARRAY_REF, TREE_TYPE (new_var), iavar,
sctx->lastlane, NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (*rvar) = 1;
if (ctx->scan_exclusive)
{
/* And for exclusive scan yet another one, which will
hold the value during the scan phase. */
tree savar = create_tmp_var_raw (atype);
if (TREE_ADDRESSABLE (new_var))
TREE_ADDRESSABLE (savar) = 1;
DECL_ATTRIBUTES (savar)
= tree_cons (get_identifier ("omp simd array"), NULL,
tree_cons (get_identifier ("omp simd inscan "
"exclusive"), NULL,
DECL_ATTRIBUTES (savar)));
gimple_add_tmp_var (savar);
ctx->cb.decl_map->put (iavar, savar);
*rvar2 = build4 (ARRAY_REF, TREE_TYPE (new_var), savar,
sctx->idx, NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (*rvar2) = 1;
}
}
ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), iavar, sctx->idx,
NULL_TREE, NULL_TREE);
lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->lane,
NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (ivar) = 1;
TREE_THIS_NOTRAP (lvar) = 1;
}
if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, lvar);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
return true;
}
/* Helper function of lower_rec_input_clauses. For a reference
in simd reduction, add an underlying variable it will reference. */
static void
handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
{
tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
if (TREE_CONSTANT (z))
{
z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)),
get_name (new_vard));
gimple_add_tmp_var (z);
TREE_ADDRESSABLE (z) = 1;
z = build_fold_addr_expr_loc (loc, z);
gimplify_assign (new_vard, z, ilist);
}
}
/* Helper function for lower_rec_input_clauses. Emit into ilist sequence
code to emit (type) (tskred_temp[idx]). */
static tree
task_reduction_read (gimple_seq *ilist, tree tskred_temp, tree type,
unsigned idx)
{
unsigned HOST_WIDE_INT sz
= tree_to_uhwi (TYPE_SIZE_UNIT (pointer_sized_int_node));
tree r = build2 (MEM_REF, pointer_sized_int_node,
tskred_temp, build_int_cst (TREE_TYPE (tskred_temp),
idx * sz));
tree v = create_tmp_var (pointer_sized_int_node);
gimple *g = gimple_build_assign (v, r);
gimple_seq_add_stmt (ilist, g);
if (!useless_type_conversion_p (type, pointer_sized_int_node))
{
v = create_tmp_var (type);
g = gimple_build_assign (v, NOP_EXPR, gimple_assign_lhs (g));
gimple_seq_add_stmt (ilist, g);
}
return v;
}
/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
from the receiver (aka child) side and initializers for REFERENCE_TYPE
private variables. Initialization statements go in ILIST, while calls
to destructors go in DLIST. */
static void
lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
omp_context *ctx, struct omp_for_data *fd)
{
tree c, copyin_seq, x, ptr;
bool copyin_by_ref = false;
bool lastprivate_firstprivate = false;
bool reduction_omp_orig_ref = false;
int pass;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
omplow_simd_context sctx = omplow_simd_context ();
tree simt_lane = NULL_TREE, simtrec = NULL_TREE;
tree ivar = NULL_TREE, lvar = NULL_TREE, uid = NULL_TREE;
gimple_seq llist[4] = { };
tree nonconst_simd_if = NULL_TREE;
copyin_seq = NULL;
sctx.is_simt = is_simd && omp_find_clause (clauses, OMP_CLAUSE__SIMT_);
/* Set max_vf=1 (which will later enforce safelen=1) in simd loops
with data sharing clauses referencing variable sized vars. That
is unnecessarily hard to support and very unlikely to result in
vectorized code anyway. */
if (is_simd)
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_LINEAR:
if (OMP_CLAUSE_LINEAR_ARRAY (c))
sctx.max_vf = 1;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_LASTPRIVATE:
if (is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
else if (omp_is_reference (OMP_CLAUSE_DECL (c)))
{
tree rtype = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c)));
if (!TREE_CONSTANT (TYPE_SIZE_UNIT (rtype)))
sctx.max_vf = 1;
}
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
|| is_variable_sized (OMP_CLAUSE_DECL (c)))
sctx.max_vf = 1;
else if (omp_is_reference (OMP_CLAUSE_DECL (c)))
{
tree rtype = TREE_TYPE (TREE_TYPE (OMP_CLAUSE_DECL (c)));
if (!TREE_CONSTANT (TYPE_SIZE_UNIT (rtype)))
sctx.max_vf = 1;
}
break;
case OMP_CLAUSE_IF:
if (integer_zerop (OMP_CLAUSE_IF_EXPR (c)))
sctx.max_vf = 1;
else if (TREE_CODE (OMP_CLAUSE_IF_EXPR (c)) != INTEGER_CST)
nonconst_simd_if = OMP_CLAUSE_IF_EXPR (c);
break;
case OMP_CLAUSE_SIMDLEN:
if (integer_onep (OMP_CLAUSE_SIMDLEN_EXPR (c)))
sctx.max_vf = 1;
break;
case OMP_CLAUSE__CONDTEMP_:
/* FIXME: lastprivate(conditional:) not handled for SIMT yet. */
if (sctx.is_simt)
sctx.max_vf = 1;
break;
default:
continue;
}
/* Add a placeholder for simduid. */
if (sctx.is_simt && maybe_ne (sctx.max_vf, 1U))
sctx.simt_eargs.safe_push (NULL_TREE);
unsigned task_reduction_cnt = 0;
unsigned task_reduction_cntorig = 0;
unsigned task_reduction_cnt_full = 0;
unsigned task_reduction_cntorig_full = 0;
unsigned task_reduction_other_cnt = 0;
tree tskred_atype = NULL_TREE, tskred_avar = NULL_TREE;
tree tskred_base = NULL_TREE, tskred_temp = NULL_TREE;
/* Do all the fixed sized types in the first pass, and the variable sized
types in the second pass. This makes sure that the scalar arguments to
the variable sized types are processed before we use them in the
variable sized operations. For task reductions we use 4 passes, in the
first two we ignore them, in the third one gather arguments for
GOMP_task_reduction_remap call and in the last pass actually handle
the task reductions. */
for (pass = 0; pass < ((task_reduction_cnt || task_reduction_other_cnt)
? 4 : 2); ++pass)
{
if (pass == 2 && task_reduction_cnt)
{
tskred_atype
= build_array_type_nelts (ptr_type_node, task_reduction_cnt
+ task_reduction_cntorig);
tskred_avar = create_tmp_var_raw (tskred_atype);
gimple_add_tmp_var (tskred_avar);
TREE_ADDRESSABLE (tskred_avar) = 1;
task_reduction_cnt_full = task_reduction_cnt;
task_reduction_cntorig_full = task_reduction_cntorig;
}
else if (pass == 3 && task_reduction_cnt)
{
x = builtin_decl_explicit (BUILT_IN_GOMP_TASK_REDUCTION_REMAP);
gimple *g
= gimple_build_call (x, 3, size_int (task_reduction_cnt),
size_int (task_reduction_cntorig),
build_fold_addr_expr (tskred_avar));
gimple_seq_add_stmt (ilist, g);
}
if (pass == 3 && task_reduction_other_cnt)
{
/* For reduction clauses, build
tskred_base = (void *) tskred_temp[2]
+ omp_get_thread_num () * tskred_temp[1]
or if tskred_temp[1] is known to be constant, that constant
directly. This is the start of the private reduction copy block
for the current thread. */
tree v = create_tmp_var (integer_type_node);
x = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
gimple *g = gimple_build_call (x, 0);
gimple_call_set_lhs (g, v);
gimple_seq_add_stmt (ilist, g);
c = omp_find_clause (clauses, OMP_CLAUSE__REDUCTEMP_);
tskred_temp = OMP_CLAUSE_DECL (c);
if (is_taskreg_ctx (ctx))
tskred_temp = lookup_decl (tskred_temp, ctx);
tree v2 = create_tmp_var (sizetype);
g = gimple_build_assign (v2, NOP_EXPR, v);
gimple_seq_add_stmt (ilist, g);
if (ctx->task_reductions[0])
v = fold_convert (sizetype, ctx->task_reductions[0]);
else
v = task_reduction_read (ilist, tskred_temp, sizetype, 1);
tree v3 = create_tmp_var (sizetype);
g = gimple_build_assign (v3, MULT_EXPR, v2, v);
gimple_seq_add_stmt (ilist, g);
v = task_reduction_read (ilist, tskred_temp, ptr_type_node, 2);
tskred_base = create_tmp_var (ptr_type_node);
g = gimple_build_assign (tskred_base, POINTER_PLUS_EXPR, v, v3);
gimple_seq_add_stmt (ilist, g);
}
task_reduction_cnt = 0;
task_reduction_cntorig = 0;
task_reduction_other_cnt = 0;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
tree var, new_var;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
bool task_reduction_p = false;
bool task_reduction_needs_orig_p = false;
tree cond = NULL_TREE;
switch (c_kind)
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_DEBUG (c))
continue;
break;
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct inside
of target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
continue;
if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
{
gcc_assert (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)
|| is_global_var (OMP_CLAUSE_DECL (c)));
continue;
}
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
if (is_task_ctx (ctx) || OMP_CLAUSE_REDUCTION_TASK (c))
{
task_reduction_p = true;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
task_reduction_other_cnt++;
if (pass == 2)
continue;
}
else
task_reduction_cnt++;
if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
var = OMP_CLAUSE_DECL (c);
/* If var is a global variable that isn't privatized
in outer contexts, we don't need to look up the
original address, it is always the address of the
global variable itself. */
if (!DECL_P (var)
|| omp_is_reference (var)
|| !is_global_var
(maybe_lookup_decl_in_outer_ctx (var, ctx)))
{
task_reduction_needs_orig_p = true;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
task_reduction_cntorig++;
}
}
}
else if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
reduction_omp_orig_ref = true;
break;
case OMP_CLAUSE__REDUCTEMP_:
if (!is_taskreg_ctx (ctx))
continue;
/* FALLTHRU */
case OMP_CLAUSE__LOOPTEMP_:
/* Handle _looptemp_/_reductemp_ clauses only on
parallel/task. */
if (fd)
continue;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
{
lastprivate_firstprivate = true;
if (pass != 0 || is_taskloop_ctx (ctx))
continue;
}
/* Even without corresponding firstprivate, if
decl is Fortran allocatable, it needs outer var
reference. */
else if (pass == 0
&& lang_hooks.decls.omp_private_outer_ref
(OMP_CLAUSE_DECL (c)))
lastprivate_firstprivate = true;
break;
case OMP_CLAUSE_ALIGNED:
if (pass != 1)
continue;
var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
&& !is_global_var (var))
{
new_var = maybe_lookup_decl (var, ctx);
if (new_var == NULL_TREE)
new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
x = build_call_expr_loc (clause_loc, x, 2, new_var, alarg);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
}
else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (var))
{
tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
new_var = lookup_decl (var, ctx);
t = maybe_lookup_decl_in_outer_ctx (var, ctx);
t = build_fold_addr_expr_loc (clause_loc, t);
t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
tree alarg = omp_clause_aligned_alignment (c);
alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
t = build_call_expr_loc (clause_loc, t2, 2, t, alarg);
t = fold_convert_loc (clause_loc, ptype, t);
x = create_tmp_var (ptype);
t = build2 (MODIFY_EXPR, ptype, x, t);
gimplify_and_add (t, ilist);
t = build_simple_mem_ref_loc (clause_loc, x);
SET_DECL_VALUE_EXPR (new_var, t);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
case OMP_CLAUSE__CONDTEMP_:
if (is_parallel_ctx (ctx)
|| (is_simd && !OMP_CLAUSE__CONDTEMP__ITER (c)))
break;
continue;
default:
continue;
}
if (task_reduction_p != (pass >= 2))
continue;
new_var = var = OMP_CLAUSE_DECL (c);
if ((c_kind == OMP_CLAUSE_REDUCTION
|| c_kind == OMP_CLAUSE_IN_REDUCTION)
&& TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == INDIRECT_REF
|| TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
new_var = var;
}
if (c_kind != OMP_CLAUSE_COPYIN)
new_var = lookup_decl (var, ctx);
if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
{
if (pass != 0)
continue;
}
/* C/C++ array section reductions. */
else if ((c_kind == OMP_CLAUSE_REDUCTION
|| c_kind == OMP_CLAUSE_IN_REDUCTION)
&& var != OMP_CLAUSE_DECL (c))
{
if (pass == 0)
continue;
tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (orig_var, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc,
TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
orig_var = TREE_OPERAND (orig_var, 0);
}
if (pass == 2)
{
tree out = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (is_global_var (out)
&& TREE_CODE (TREE_TYPE (out)) != POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (out)) != REFERENCE_TYPE
|| (TREE_CODE (TREE_TYPE (TREE_TYPE (out)))
!= POINTER_TYPE)))
x = var;
else
{
bool by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
if (TREE_CODE (TREE_TYPE (var)) == REFERENCE_TYPE
&& (TREE_CODE (TREE_TYPE (TREE_TYPE (var)))
== POINTER_TYPE))
x = build_fold_addr_expr (x);
}
if (TREE_CODE (orig_var) == INDIRECT_REF)
x = build_simple_mem_ref (x);
else if (TREE_CODE (orig_var) == ADDR_EXPR)
{
if (var == TREE_OPERAND (orig_var, 0))
x = build_fold_addr_expr (x);
}
bias = fold_convert (sizetype, bias);
x = fold_convert (ptr_type_node, x);
x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (x), x, bias);
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += (task_reduction_cntorig_full
- task_reduction_cntorig);
else
cnt = task_reduction_cntorig - 1;
tree r = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
gimplify_assign (r, x, ilist);
continue;
}
if (TREE_CODE (orig_var) == INDIRECT_REF
|| TREE_CODE (orig_var) == ADDR_EXPR)
orig_var = TREE_OPERAND (orig_var, 0);
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
const char *name = get_name (orig_var);
if (pass == 3)
{
tree xv = create_tmp_var (ptr_type_node);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
{
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += (task_reduction_cntorig_full
- task_reduction_cntorig);
else
cnt = task_reduction_cntorig - 1;
x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
gimple *g = gimple_build_assign (xv, x);
gimple_seq_add_stmt (ilist, g);
}
else
{
unsigned int idx = *ctx->task_reduction_map->get (c);
tree off;
if (ctx->task_reductions[1 + idx])
off = fold_convert (sizetype,
ctx->task_reductions[1 + idx]);
else
off = task_reduction_read (ilist, tskred_temp, sizetype,
7 + 3 * idx + 1);
gimple *g = gimple_build_assign (xv, POINTER_PLUS_EXPR,
tskred_base, off);
gimple_seq_add_stmt (ilist, g);
}
x = fold_convert (build_pointer_type (boolean_type_node),
xv);
if (TREE_CONSTANT (v))
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (x), x,
TYPE_SIZE_UNIT (type));
else
{
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, ilist, NULL, is_gimple_val,
fb_rvalue);
t = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (v), v,
build_int_cst (TREE_TYPE (v), 1));
t = fold_build2_loc (clause_loc, MULT_EXPR,
TREE_TYPE (v), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (x), x, t);
}
cond = create_tmp_var (TREE_TYPE (x));
gimplify_assign (cond, x, ilist);
x = xv;
}
else if (TREE_CONSTANT (v))
{
x = create_tmp_var_raw (type, name);
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, ilist, NULL, is_gimple_val, fb_rvalue);
t = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (v), v,
build_int_cst (TREE_TYPE (v), 1));
t = fold_build2_loc (clause_loc, MULT_EXPR,
TREE_TYPE (v), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
tree al = size_int (TYPE_ALIGN (TREE_TYPE (type)));
x = build_call_expr_loc (clause_loc, atmp, 2, t, al);
}
tree ptype = build_pointer_type (TREE_TYPE (type));
x = fold_convert_loc (clause_loc, ptype, x);
tree y = create_tmp_var (ptype, name);
gimplify_assign (y, x, ilist);
x = y;
tree yb = y;
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, pointer_sized_int_node,
bias);
yb = fold_convert_loc (clause_loc, pointer_sized_int_node,
x);
yb = fold_build2_loc (clause_loc, MINUS_EXPR,
pointer_sized_int_node, yb, bias);
x = fold_convert_loc (clause_loc, TREE_TYPE (x), yb);
yb = create_tmp_var (ptype, name);
gimplify_assign (yb, x, ilist);
x = yb;
}
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var != var)
{
gcc_assert (is_variable_sized (orig_var));
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var),
x);
gimplify_assign (new_var, x, ilist);
tree new_orig_var = lookup_decl (orig_var, ctx);
tree t = build_fold_indirect_ref (new_var);
DECL_IGNORED_P (new_var) = 0;
TREE_THIS_NOTRAP (t) = 1;
SET_DECL_VALUE_EXPR (new_orig_var, t);
DECL_HAS_VALUE_EXPR_P (new_orig_var) = 1;
}
else
{
x = build2 (MEM_REF, TREE_TYPE (new_var), x,
build_int_cst (ptype, 0));
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
}
else
{
gcc_assert (orig_var == var);
if (TREE_CODE (d) == INDIRECT_REF)
{
x = create_tmp_var (ptype, name);
TREE_ADDRESSABLE (x) = 1;
gimplify_assign (x, yb, ilist);
x = build_fold_addr_expr_loc (clause_loc, x);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
/* GOMP_taskgroup_reduction_register memsets the whole
array to zero. If the initializer is zero, we don't
need to initialize it again, just mark it as ever
used unconditionally, i.e. cond = true. */
if (cond
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE
&& initializer_zerop (omp_reduction_init (c,
TREE_TYPE (type))))
{
gimple *g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
continue;
}
tree end = create_artificial_label (UNKNOWN_LOCATION);
if (cond)
{
gimple *g;
if (!is_parallel_ctx (ctx))
{
tree condv = create_tmp_var (boolean_type_node);
g = gimple_build_assign (condv,
build_simple_mem_ref (cond));
gimple_seq_add_stmt (ilist, g);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, condv,
boolean_false_node, end, lab1);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist, gimple_build_label (lab1));
}
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
}
tree y1 = create_tmp_var (ptype);
gimplify_assign (y1, y, ilist);
tree i2 = NULL_TREE, y2 = NULL_TREE;
tree body2 = NULL_TREE, end2 = NULL_TREE;
tree y3 = NULL_TREE, y4 = NULL_TREE;
if (task_reduction_needs_orig_p)
{
y3 = create_tmp_var (ptype);
tree ref;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
ref = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (task_reduction_cnt_full
+ task_reduction_cntorig - 1),
NULL_TREE, NULL_TREE);
else
{
unsigned int idx = *ctx->task_reduction_map->get (c);
ref = task_reduction_read (ilist, tskred_temp, ptype,
7 + 3 * idx);
}
gimplify_assign (y3, ref, ilist);
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) || is_simd)
{
if (pass != 3)
{
y2 = create_tmp_var (ptype);
gimplify_assign (y2, y, ilist);
}
if (is_simd || OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
tree ref = build_outer_var_ref (var, ctx);
/* For ref build_outer_var_ref already performs this. */
if (TREE_CODE (d) == INDIRECT_REF)
gcc_assert (omp_is_reference (var));
else if (TREE_CODE (d) == ADDR_EXPR)
ref = build_fold_addr_expr (ref);
else if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
ref = fold_convert_loc (clause_loc, ptype, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
&& OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
y3 = create_tmp_var (ptype);
gimplify_assign (y3, unshare_expr (ref), ilist);
}
if (is_simd)
{
y4 = create_tmp_var (ptype);
gimplify_assign (y4, ref, dlist);
}
}
}
tree i = create_tmp_var (TREE_TYPE (v));
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), ilist);
tree body = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (ilist, gimple_build_label (body));
if (y2)
{
i2 = create_tmp_var (TREE_TYPE (v));
gimplify_assign (i2, build_int_cst (TREE_TYPE (v), 0), dlist);
body2 = create_artificial_label (UNKNOWN_LOCATION);
end2 = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_label (body2));
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y1));
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
SET_DECL_VALUE_EXPR (placeholder,
y3 ? build_simple_mem_ref (y3)
: error_mark_node);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
x = lang_hooks.decls.omp_clause_default_ctor
(c, build_simple_mem_ref (y1),
y3 ? build_simple_mem_ref (y3) : NULL_TREE);
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
SET_DECL_VALUE_EXPR (decl_placeholder,
build_simple_mem_ref (y2));
SET_DECL_VALUE_EXPR (placeholder,
build_simple_mem_ref (y4));
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 0;
if (y2)
{
x = lang_hooks.decls.omp_clause_dtor
(c, build_simple_mem_ref (y2));
if (x)
gimplify_and_add (x, dlist);
}
}
else
{
x = omp_reduction_init (c, TREE_TYPE (type));
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
gimplify_assign (build_simple_mem_ref (y1), x, ilist);
if (is_simd)
{
x = build2 (code, TREE_TYPE (type),
build_simple_mem_ref (y4),
build_simple_mem_ref (y2));
gimplify_assign (build_simple_mem_ref (y4), x, dlist);
}
}
gimple *g
= gimple_build_assign (y1, POINTER_PLUS_EXPR, y1,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
if (y3)
{
g = gimple_build_assign (y3, POINTER_PLUS_EXPR, y3,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (ilist, g);
}
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (ilist, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist, gimple_build_label (end));
if (y2)
{
g = gimple_build_assign (y2, POINTER_PLUS_EXPR, y2,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
if (y4)
{
g = gimple_build_assign
(y4, POINTER_PLUS_EXPR, y4,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (dlist, g);
}
g = gimple_build_assign (i2, PLUS_EXPR, i2,
build_int_cst (TREE_TYPE (i2), 1));
gimple_seq_add_stmt (dlist, g);
g = gimple_build_cond (LE_EXPR, i2, v, body2, end2);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end2));
}
continue;
}
else if (pass == 2)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
x = var;
else
{
bool by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
}
if (!omp_is_reference (var))
x = build_fold_addr_expr (x);
x = fold_convert (ptr_type_node, x);
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += task_reduction_cntorig_full - task_reduction_cntorig;
else
cnt = task_reduction_cntorig - 1;
tree r = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
gimplify_assign (r, x, ilist);
continue;
}
else if (pass == 3)
{
tree type = TREE_TYPE (new_var);
if (!omp_is_reference (var))
type = build_pointer_type (type);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
{
unsigned cnt = task_reduction_cnt - 1;
if (!task_reduction_needs_orig_p)
cnt += (task_reduction_cntorig_full
- task_reduction_cntorig);
else
cnt = task_reduction_cntorig - 1;
x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (cnt), NULL_TREE, NULL_TREE);
}
else
{
unsigned int idx = *ctx->task_reduction_map->get (c);
tree off;
if (ctx->task_reductions[1 + idx])
off = fold_convert (sizetype,
ctx->task_reductions[1 + idx]);
else
off = task_reduction_read (ilist, tskred_temp, sizetype,
7 + 3 * idx + 1);
x = fold_build2 (POINTER_PLUS_EXPR, ptr_type_node,
tskred_base, off);
}
x = fold_convert (type, x);
tree t;
if (omp_is_reference (var))
{
gimplify_assign (new_var, x, ilist);
t = new_var;
new_var = build_simple_mem_ref (new_var);
}
else
{
t = create_tmp_var (type);
gimplify_assign (t, x, ilist);
SET_DECL_VALUE_EXPR (new_var, build_simple_mem_ref (t));
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
t = fold_convert (build_pointer_type (boolean_type_node), t);
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
cond = create_tmp_var (TREE_TYPE (t));
gimplify_assign (cond, t, ilist);
}
else if (is_variable_sized (var))
{
/* For variable sized types, we need to allocate the
actual storage here. Call alloca and store the
result in the pointer decl that we created elsewhere. */
if (pass == 0)
continue;
if (c_kind != OMP_CLAUSE_FIRSTPRIVATE || !is_task_ctx (ctx))
{
gcall *stmt;
tree tmp, atmp;
ptr = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (ptr) == INDIRECT_REF);
ptr = TREE_OPERAND (ptr, 0);
gcc_assert (DECL_P (ptr));
x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
/* void *tmp = __builtin_alloca */
atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
stmt = gimple_build_call (atmp, 2, x,
size_int (DECL_ALIGN (var)));
cfun->calls_alloca = 1;
tmp = create_tmp_var_raw (ptr_type_node);
gimple_add_tmp_var (tmp);
gimple_call_set_lhs (stmt, tmp);
gimple_seq_add_stmt (ilist, stmt);
x = fold_convert_loc (clause_loc, TREE_TYPE (ptr), tmp);
gimplify_assign (ptr, x, ilist);
}
}
else if (omp_is_reference (var)
&& (c_kind != OMP_CLAUSE_FIRSTPRIVATE
|| !OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c)))
{
/* For references that are being privatized for Fortran,
allocate new backing storage for the new pointer
variable. This allows us to avoid changing all the
code that expects a pointer to something that expects
a direct variable. */
if (pass == 0)
continue;
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (c_kind == OMP_CLAUSE_FIRSTPRIVATE && is_task_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
x = build_fold_addr_expr_loc (clause_loc, x);
}
else if (TREE_CONSTANT (x))
{
/* For reduction in SIMD loop, defer adding the
initialization of the reference, because if we decide
to use SIMD array for it, the initilization could cause
expansion ICE. Ditto for other privatization clauses. */
if (is_simd)
x = NULL_TREE;
else
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
}
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
if (x)
{
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_assign (new_var, x, ilist);
}
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
else if ((c_kind == OMP_CLAUSE_REDUCTION
|| c_kind == OMP_CLAUSE_IN_REDUCTION)
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
if (pass == 0)
continue;
}
else if (pass != 0)
continue;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
/* Ignore shared directives in teams construct inside
target construct. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS
&& !is_host_teams_ctx (ctx))
continue;
/* Shared global vars are just accessed directly. */
if (is_global_var (new_var))
break;
/* For taskloop firstprivate/lastprivate, represented
as firstprivate and shared clause on the task, new_var
is the firstprivate var. */
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
/* Set up the DECL_VALUE_EXPR for shared variables now. This
needs to be delayed until after fixup_child_record_type so
that we get the correct type during the dereference. */
by_ref = use_pointer_for_field (var, ctx);
x = build_receiver_ref (var, by_ref, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
/* ??? If VAR is not passed by reference, and the variable
hasn't been initialized yet, then we'll get a warning for
the store into the omp_data_s structure. Ideally, we'd be
able to notice this and not store anything at all, but
we're generating code too early. Suppress the warning. */
if (!by_ref)
TREE_NO_WARNING (var) = 1;
break;
case OMP_CLAUSE__CONDTEMP_:
if (is_parallel_ctx (ctx))
{
x = build_receiver_ref (var, false, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else if (is_simd && !OMP_CLAUSE__CONDTEMP__ITER (c))
{
x = build_zero_cst (TREE_TYPE (var));
goto do_private;
}
break;
case OMP_CLAUSE_LASTPRIVATE:
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
break;
/* FALLTHRU */
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_PRIVATE)
x = build_outer_var_ref (var, ctx);
else if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
{
if (is_task_ctx (ctx))
x = build_receiver_ref (var, false, ctx);
else
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_PRIVATE);
}
else
x = NULL;
do_private:
tree nx;
bool copy_ctor;
copy_ctor = false;
nx = unshare_expr (new_var);
if (is_simd
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c))
copy_ctor = true;
if (copy_ctor)
nx = lang_hooks.decls.omp_clause_copy_ctor (c, nx, x);
else
nx = lang_hooks.decls.omp_clause_default_ctor (c, nx, x);
if (is_simd)
{
tree y = lang_hooks.decls.omp_clause_dtor (c, new_var);
if ((TREE_ADDRESSABLE (new_var) || nx || y
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& (gimple_omp_for_collapse (ctx->stmt) != 1
|| (gimple_omp_for_index (ctx->stmt, 0)
!= new_var)))
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE__CONDTEMP_
|| omp_is_reference (var))
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
if (nx)
{
tree iv = unshare_expr (ivar);
if (copy_ctor)
x = lang_hooks.decls.omp_clause_copy_ctor (c, iv,
x);
else
x = lang_hooks.decls.omp_clause_default_ctor (c,
iv,
x);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE__CONDTEMP_)
{
x = build2 (MODIFY_EXPR, TREE_TYPE (ivar),
unshare_expr (ivar), x);
nx = x;
}
if (nx && x)
gimplify_and_add (x, &llist[0]);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
tree v = new_var;
if (!DECL_P (v))
{
gcc_assert (TREE_CODE (v) == MEM_REF);
v = TREE_OPERAND (v, 0);
gcc_assert (DECL_P (v));
}
v = *ctx->lastprivate_conditional_map->get (v);
tree t = create_tmp_var (TREE_TYPE (v));
tree z = build_zero_cst (TREE_TYPE (v));
tree orig_v
= build_outer_var_ref (var, ctx,
OMP_CLAUSE_LASTPRIVATE);
gimple_seq_add_stmt (dlist,
gimple_build_assign (t, z));
gcc_assert (DECL_HAS_VALUE_EXPR_P (v));
tree civar = DECL_VALUE_EXPR (v);
gcc_assert (TREE_CODE (civar) == ARRAY_REF);
civar = unshare_expr (civar);
TREE_OPERAND (civar, 1) = sctx.idx;
x = build2 (MODIFY_EXPR, TREE_TYPE (t), t,
unshare_expr (civar));
x = build2 (COMPOUND_EXPR, TREE_TYPE (orig_v), x,
build2 (MODIFY_EXPR, TREE_TYPE (orig_v),
orig_v, unshare_expr (ivar)));
tree cond = build2 (LT_EXPR, boolean_type_node, t,
civar);
x = build3 (COND_EXPR, void_type_node, cond, x,
void_node);
gimple_seq tseq = NULL;
gimplify_and_add (x, &tseq);
if (ctx->outer)
lower_omp (&tseq, ctx->outer);
gimple_seq_add_seq (&llist[1], tseq);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& ctx->for_simd_scan_phase)
{
x = unshare_expr (ivar);
tree orig_v
= build_outer_var_ref (var, ctx,
OMP_CLAUSE_LASTPRIVATE);
x = lang_hooks.decls.omp_clause_assign_op (c, x,
orig_v);
gimplify_and_add (x, &llist[0]);
}
if (y)
{
y = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (y)
gimplify_and_add (y, &llist[1]);
}
break;
}
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
tree type = TREE_TYPE (TREE_TYPE (new_vard));
x = TYPE_SIZE_UNIT (type);
if (TREE_CONSTANT (x))
{
x = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
x = fold_convert_loc (clause_loc,
TREE_TYPE (new_vard), x);
gimplify_assign (new_vard, x, ilist);
}
}
}
if (nx)
gimplify_and_add (nx, ilist);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& is_simd
&& ctx->for_simd_scan_phase)
{
tree orig_v = build_outer_var_ref (var, ctx,
OMP_CLAUSE_LASTPRIVATE);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var,
orig_v);
gimplify_and_add (x, ilist);
}
/* FALLTHRU */
do_dtor:
x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
gimplify_and_add (x, dlist);
break;
case OMP_CLAUSE_LINEAR:
if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c))
goto do_firstprivate;
if (OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
x = NULL;
else
x = build_outer_var_ref (var, ctx);
goto do_private;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_task_ctx (ctx))
{
if ((omp_is_reference (var)
&& !OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c))
|| is_variable_sized (var))
goto do_dtor;
else if (is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
|| use_pointer_for_field (var, NULL))
{
x = build_receiver_ref (var, false, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
goto do_dtor;
}
}
if (OMP_CLAUSE_FIRSTPRIVATE_NO_REFERENCE (c)
&& omp_is_reference (var))
{
x = build_outer_var_ref (var, ctx);
gcc_assert (TREE_CODE (x) == MEM_REF
&& integer_zerop (TREE_OPERAND (x, 1)));
x = TREE_OPERAND (x, 0);
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (new_var), x);
gimplify_and_add (x, ilist);
goto do_dtor;
}
do_firstprivate:
x = build_outer_var_ref (var, ctx);
if (is_simd)
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& gimple_omp_for_combined_into_p (ctx->stmt))
{
tree t = OMP_CLAUSE_LINEAR_STEP (c);
tree stept = TREE_TYPE (t);
tree ct = omp_find_clause (clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (ct);
tree l = OMP_CLAUSE_DECL (ct);
tree n1 = fd->loop.n1;
tree step = fd->loop.step;
tree itype = TREE_TYPE (l);
if (POINTER_TYPE_P (itype))
itype = signed_type_for (itype);
l = fold_build2 (MINUS_EXPR, itype, l, n1);
if (TYPE_UNSIGNED (itype)
&& fd->loop.cond_code == GT_EXPR)
l = fold_build2 (TRUNC_DIV_EXPR, itype,
fold_build1 (NEGATE_EXPR, itype, l),
fold_build1 (NEGATE_EXPR,
itype, step));
else
l = fold_build2 (TRUNC_DIV_EXPR, itype, l, step);
t = fold_build2 (MULT_EXPR, stept,
fold_convert (stept, l), t);
if (OMP_CLAUSE_LINEAR_ARRAY (c))
{
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
tree type = TREE_TYPE (TREE_TYPE (new_vard));
nx = TYPE_SIZE_UNIT (type);
if (TREE_CONSTANT (nx))
{
nx = create_tmp_var_raw (type,
get_name (var));
gimple_add_tmp_var (nx);
TREE_ADDRESSABLE (nx) = 1;
nx = build_fold_addr_expr_loc (clause_loc,
nx);
nx = fold_convert_loc (clause_loc,
TREE_TYPE (new_vard),
nx);
gimplify_assign (new_vard, nx, ilist);
}
}
x = lang_hooks.decls.omp_clause_linear_ctor
(c, new_var, x, t);
gimplify_and_add (x, ilist);
goto do_dtor;
}
if (POINTER_TYPE_P (TREE_TYPE (x)))
x = fold_build2 (POINTER_PLUS_EXPR,
TREE_TYPE (x), x, t);
else
x = fold_build2 (PLUS_EXPR, TREE_TYPE (x), x, t);
}
if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR
|| TREE_ADDRESSABLE (new_var)
|| omp_is_reference (var))
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar))
{
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR)
{
tree iv = create_tmp_var (TREE_TYPE (new_var));
x = lang_hooks.decls.omp_clause_copy_ctor (c, iv, x);
gimplify_and_add (x, ilist);
gimple_stmt_iterator gsi
= gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gassign *g
= gimple_build_assign (unshare_expr (lvar), iv);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
tree t = OMP_CLAUSE_LINEAR_STEP (c);
enum tree_code code = PLUS_EXPR;
if (POINTER_TYPE_P (TREE_TYPE (new_var)))
code = POINTER_PLUS_EXPR;
g = gimple_build_assign (iv, code, iv, t);
gsi_insert_before_without_update (&gsi, g,
GSI_SAME_STMT);
break;
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (ivar), x);
gimplify_and_add (x, &llist[0]);
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
break;
}
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
tree new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
tree type = TREE_TYPE (TREE_TYPE (new_vard));
nx = TYPE_SIZE_UNIT (type);
if (TREE_CONSTANT (nx))
{
nx = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (nx);
TREE_ADDRESSABLE (nx) = 1;
nx = build_fold_addr_expr_loc (clause_loc, nx);
nx = fold_convert_loc (clause_loc,
TREE_TYPE (new_vard), nx);
gimplify_assign (new_vard, nx, ilist);
}
}
}
x = lang_hooks.decls.omp_clause_copy_ctor
(c, unshare_expr (new_var), x);
gimplify_and_add (x, ilist);
goto do_dtor;
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
gcc_assert (is_taskreg_ctx (ctx));
x = build_outer_var_ref (var, ctx);
x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
gimplify_and_add (x, ilist);
break;
case OMP_CLAUSE_COPYIN:
by_ref = use_pointer_for_field (var, NULL);
x = build_receiver_ref (var, by_ref, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, x);
append_to_statement_list (x, ©in_seq);
copyin_by_ref |= by_ref;
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
/* OpenACC reductions are initialized using the
GOACC_REDUCTION internal function. */
if (is_gimple_omp_oacc (ctx->stmt))
break;
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
gimple *tseq;
tree ptype = TREE_TYPE (placeholder);
if (cond)
{
x = error_mark_node;
if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c)
&& !task_reduction_needs_orig_p)
x = var;
else if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
{
tree pptype = build_pointer_type (ptype);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION)
x = build4 (ARRAY_REF, ptr_type_node, tskred_avar,
size_int (task_reduction_cnt_full
+ task_reduction_cntorig - 1),
NULL_TREE, NULL_TREE);
else
{
unsigned int idx
= *ctx->task_reduction_map->get (c);
x = task_reduction_read (ilist, tskred_temp,
pptype, 7 + 3 * idx);
}
x = fold_convert (pptype, x);
x = build_simple_mem_ref (x);
}
}
else
{
x = build_outer_var_ref (var, ctx);
if (omp_is_reference (var)
&& !useless_type_conversion_p (ptype, TREE_TYPE (x)))
x = build_fold_addr_expr_loc (clause_loc, x);
}
SET_DECL_VALUE_EXPR (placeholder, x);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
tree new_vard = new_var;
if (omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
tree rvar = NULL_TREE, *rvarp = NULL, rvar2 = NULL_TREE;
if (is_simd
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
rvarp = &rvar;
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar, rvarp,
&rvar2))
{
if (new_vard == new_var)
{
gcc_assert (DECL_VALUE_EXPR (new_var) == lvar);
SET_DECL_VALUE_EXPR (new_var, ivar);
}
else
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (ivar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (ivar),
build_outer_var_ref (var, ctx));
if (rvarp && ctx->for_simd_scan_phase)
{
if (x)
gimplify_and_add (x, &llist[0]);
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
break;
}
else if (rvarp)
{
if (x)
{
gimplify_and_add (x, &llist[0]);
tree ivar2 = unshare_expr (lvar);
TREE_OPERAND (ivar2, 1) = sctx.idx;
x = lang_hooks.decls.omp_clause_default_ctor
(c, ivar2, build_outer_var_ref (var, ctx));
gimplify_and_add (x, &llist[0]);
if (rvar2)
{
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (rvar2),
build_outer_var_ref (var, ctx));
gimplify_and_add (x, &llist[0]);
}
/* For types that need construction, add another
private var which will be default constructed
and optionally initialized with
OMP_CLAUSE_REDUCTION_GIMPLE_INIT, as in the
loop we want to assign this value instead of
constructing and destructing it in each
iteration. */
tree nv = create_tmp_var_raw (TREE_TYPE (ivar));
gimple_add_tmp_var (nv);
ctx->cb.decl_map->put (TREE_OPERAND (rvar2
? rvar2
: ivar, 0),
nv);
x = lang_hooks.decls.omp_clause_default_ctor
(c, nv, build_outer_var_ref (var, ctx));
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
x = DECL_VALUE_EXPR (new_vard);
tree vexpr = nv;
if (new_vard != new_var)
vexpr = build_fold_addr_expr (nv);
SET_DECL_VALUE_EXPR (new_vard, vexpr);
lower_omp (&tseq, ctx);
SET_DECL_VALUE_EXPR (new_vard, x);
gimple_seq_add_seq (ilist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
}
x = lang_hooks.decls.omp_clause_dtor (c, nv);
if (x)
gimplify_and_add (x, dlist);
}
tree ref = build_outer_var_ref (var, ctx);
x = unshare_expr (ivar);
x = lang_hooks.decls.omp_clause_assign_op (c, x,
ref);
gimplify_and_add (x, &llist[0]);
ref = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, ref,
rvar);
gimplify_and_add (x, &llist[3]);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (new_vard == new_var)
SET_DECL_VALUE_EXPR (new_var, lvar);
else
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
tree ivar2 = unshare_expr (lvar);
TREE_OPERAND (ivar2, 1) = sctx.idx;
x = lang_hooks.decls.omp_clause_dtor (c, ivar2);
if (x)
gimplify_and_add (x, &llist[1]);
if (rvar2)
{
x = lang_hooks.decls.omp_clause_dtor (c, rvar2);
if (x)
gimplify_and_add (x, &llist[1]);
}
break;
}
if (x)
gimplify_and_add (x, &llist[0]);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[0], tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&llist[1], tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (new_vard == new_var)
SET_DECL_VALUE_EXPR (new_var, lvar);
else
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
x = lang_hooks.decls.omp_clause_dtor (c, ivar);
if (x)
gimplify_and_add (x, &llist[1]);
break;
}
/* If this is a reference to constant size reduction var
with placeholder, we haven't emitted the initializer
for it because it is undesirable if SIMD arrays are used.
But if they aren't used, we need to emit the deferred
initialization now. */
else if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
tree lab2 = NULL_TREE;
if (cond)
{
gimple *g;
if (!is_parallel_ctx (ctx))
{
tree condv = create_tmp_var (boolean_type_node);
tree m = build_simple_mem_ref (cond);
g = gimple_build_assign (condv, m);
gimple_seq_add_stmt (ilist, g);
tree lab1
= create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, condv,
boolean_false_node,
lab2, lab1);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist,
gimple_build_label (lab1));
}
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
}
x = lang_hooks.decls.omp_clause_default_ctor
(c, unshare_expr (new_var),
cond ? NULL_TREE
: build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
if (ctx->for_simd_scan_phase)
goto do_dtor;
if (x || (!is_simd
&& OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c)))
{
tree nv = create_tmp_var_raw (TREE_TYPE (new_var));
gimple_add_tmp_var (nv);
ctx->cb.decl_map->put (new_vard, nv);
x = lang_hooks.decls.omp_clause_default_ctor
(c, nv, build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, ilist);
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
tree vexpr = nv;
if (new_vard != new_var)
vexpr = build_fold_addr_expr (nv);
SET_DECL_VALUE_EXPR (new_vard, vexpr);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
lower_omp (&tseq, ctx);
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd && ctx->scan_exclusive)
{
tree nv2
= create_tmp_var_raw (TREE_TYPE (new_var));
gimple_add_tmp_var (nv2);
ctx->cb.decl_map->put (nv, nv2);
x = lang_hooks.decls.omp_clause_default_ctor
(c, nv2, build_outer_var_ref (var, ctx));
gimplify_and_add (x, ilist);
x = lang_hooks.decls.omp_clause_dtor (c, nv2);
if (x)
gimplify_and_add (x, dlist);
}
x = lang_hooks.decls.omp_clause_dtor (c, nv);
if (x)
gimplify_and_add (x, dlist);
}
else if (is_simd
&& ctx->scan_exclusive
&& TREE_ADDRESSABLE (TREE_TYPE (new_var)))
{
tree nv2 = create_tmp_var_raw (TREE_TYPE (new_var));
gimple_add_tmp_var (nv2);
ctx->cb.decl_map->put (new_vard, nv2);
x = lang_hooks.decls.omp_clause_dtor (c, nv2);
if (x)
gimplify_and_add (x, dlist);
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
goto do_dtor;
}
if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (ilist, tseq);
}
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (is_simd)
{
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (dlist, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
}
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (cond)
{
if (lab2)
gimple_seq_add_stmt (ilist, gimple_build_label (lab2));
break;
}
goto do_dtor;
}
else
{
x = omp_reduction_init (c, TREE_TYPE (new_var));
gcc_assert (TREE_CODE (TREE_TYPE (new_var)) != ARRAY_TYPE);
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
if (cond)
{
gimple *g;
tree lab2 = NULL_TREE;
/* GOMP_taskgroup_reduction_register memsets the whole
array to zero. If the initializer is zero, we don't
need to initialize it again, just mark it as ever
used unconditionally, i.e. cond = true. */
if (initializer_zerop (x))
{
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
break;
}
/* Otherwise, emit
if (!cond) { cond = true; new_var = x; } */
if (!is_parallel_ctx (ctx))
{
tree condv = create_tmp_var (boolean_type_node);
tree m = build_simple_mem_ref (cond);
g = gimple_build_assign (condv, m);
gimple_seq_add_stmt (ilist, g);
tree lab1
= create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, condv,
boolean_false_node,
lab2, lab1);
gimple_seq_add_stmt (ilist, g);
gimple_seq_add_stmt (ilist,
gimple_build_label (lab1));
}
g = gimple_build_assign (build_simple_mem_ref (cond),
boolean_true_node);
gimple_seq_add_stmt (ilist, g);
gimplify_assign (new_var, x, ilist);
if (lab2)
gimple_seq_add_stmt (ilist, gimple_build_label (lab2));
break;
}
/* reduction(-:var) sums up the partial results, so it
acts identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
tree new_vard = new_var;
if (is_simd && omp_is_reference (var))
{
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_vard = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_vard));
}
tree rvar = NULL_TREE, *rvarp = NULL, rvar2 = NULL_TREE;
if (is_simd
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
rvarp = &rvar;
if (is_simd
&& lower_rec_simd_input_clauses (new_var, ctx, &sctx,
ivar, lvar, rvarp,
&rvar2))
{
if (new_vard != new_var)
{
SET_DECL_VALUE_EXPR (new_vard,
build_fold_addr_expr (lvar));
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
tree ref = build_outer_var_ref (var, ctx);
if (rvarp)
{
if (ctx->for_simd_scan_phase)
break;
gimplify_assign (ivar, ref, &llist[0]);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, rvar, &llist[3]);
break;
}
gimplify_assign (unshare_expr (ivar), x, &llist[0]);
if (sctx.is_simt)
{
if (!simt_lane)
simt_lane = create_tmp_var (unsigned_type_node);
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_BFLY,
TREE_TYPE (ivar), 2, ivar, simt_lane);
x = build2 (code, TREE_TYPE (ivar), ivar, x);
gimplify_assign (ivar, x, &llist[2]);
}
x = build2 (code, TREE_TYPE (ref), ref, ivar);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &llist[1]);
}
else
{
if (omp_is_reference (var) && is_simd)
handle_simd_reference (clause_loc, new_vard, ilist);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
break;
gimplify_assign (new_var, x, ilist);
if (is_simd)
{
tree ref = build_outer_var_ref (var, ctx);
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, dlist);
}
}
}
break;
default:
gcc_unreachable ();
}
}
}
if (tskred_avar)
{
tree clobber = build_clobber (TREE_TYPE (tskred_avar));
gimple_seq_add_stmt (ilist, gimple_build_assign (tskred_avar, clobber));
}
if (known_eq (sctx.max_vf, 1U))
{
sctx.is_simt = false;
if (ctx->lastprivate_conditional_map)
{
if (gimple_omp_for_combined_into_p (ctx->stmt))
{
/* Signal to lower_omp_1 that it should use parent context. */
ctx->combined_into_simd_safelen1 = true;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
omp_context *outer = ctx->outer;
if (gimple_code (outer->stmt) == GIMPLE_OMP_SCAN)
outer = outer->outer;
tree *v = ctx->lastprivate_conditional_map->get (o);
tree po = lookup_decl (OMP_CLAUSE_DECL (c), outer);
tree *pv = outer->lastprivate_conditional_map->get (po);
*v = *pv;
}
}
else
{
/* When not vectorized, treat lastprivate(conditional:) like
normal lastprivate, as there will be just one simd lane
writing the privatized variable. */
delete ctx->lastprivate_conditional_map;
ctx->lastprivate_conditional_map = NULL;
}
}
}
if (nonconst_simd_if)
{
if (sctx.lane == NULL_TREE)
{
sctx.idx = create_tmp_var (unsigned_type_node);
sctx.lane = create_tmp_var (unsigned_type_node);
}
/* FIXME: For now. */
sctx.is_simt = false;
}
if (sctx.lane || sctx.is_simt)
{
uid = create_tmp_var (ptr_type_node, "simduid");
/* Don't want uninit warnings on simduid, it is always uninitialized,
but we use it not for the value, but for the DECL_UID only. */
TREE_NO_WARNING (uid) = 1;
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SIMDUID_);
OMP_CLAUSE__SIMDUID__DECL (c) = uid;
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
/* Emit calls denoting privatized variables and initializing a pointer to
structure that holds private variables as fields after ompdevlow pass. */
if (sctx.is_simt)
{
sctx.simt_eargs[0] = uid;
gimple *g
= gimple_build_call_internal_vec (IFN_GOMP_SIMT_ENTER, sctx.simt_eargs);
gimple_call_set_lhs (g, uid);
gimple_seq_add_stmt (ilist, g);
sctx.simt_eargs.release ();
simtrec = create_tmp_var (ptr_type_node, ".omp_simt");
g = gimple_build_call_internal (IFN_GOMP_SIMT_ENTER_ALLOC, 1, uid);
gimple_call_set_lhs (g, simtrec);
gimple_seq_add_stmt (ilist, g);
}
if (sctx.lane)
{
gimple *g = gimple_build_call_internal (IFN_GOMP_SIMD_LANE,
2 + (nonconst_simd_if != NULL),
uid, integer_zero_node,
nonconst_simd_if);
gimple_call_set_lhs (g, sctx.lane);
gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (ctx->stmt));
gsi_insert_before_without_update (&gsi, g, GSI_SAME_STMT);
g = gimple_build_assign (sctx.lane, INTEGER_CST,
build_int_cst (unsigned_type_node, 0));
gimple_seq_add_stmt (ilist, g);
if (sctx.lastlane)
{
g = gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2, uid, sctx.lane);
gimple_call_set_lhs (g, sctx.lastlane);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_seq (dlist, llist[3]);
}
/* Emit reductions across SIMT lanes in log_2(simt_vf) steps. */
if (llist[2])
{
tree simt_vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VF, 0);
gimple_call_set_lhs (g, simt_vf);
gimple_seq_add_stmt (dlist, g);
tree t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (simt_lane, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (dlist, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (dlist, gimple_build_goto (header));
gimple_seq_add_stmt (dlist, gimple_build_label (body));
gimple_seq_add_seq (dlist, llist[2]);
g = gimple_build_assign (simt_lane, LSHIFT_EXPR, simt_lane, integer_one_node);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, simt_lane, simt_vf, body, end);
gimple_seq_add_stmt (dlist, g);
gimple_seq_add_stmt (dlist, gimple_build_label (end));
}
for (int i = 0; i < 2; i++)
if (llist[i])
{
tree vf = create_tmp_var (unsigned_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMD_VF, 1, uid);
gimple_call_set_lhs (g, vf);
gimple_seq *seq = i == 0 ? ilist : dlist;
gimple_seq_add_stmt (seq, g);
tree t = build_int_cst (unsigned_type_node, 0);
g = gimple_build_assign (sctx.idx, INTEGER_CST, t);
gimple_seq_add_stmt (seq, g);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree header = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (seq, gimple_build_goto (header));
gimple_seq_add_stmt (seq, gimple_build_label (body));
gimple_seq_add_seq (seq, llist[i]);
t = build_int_cst (unsigned_type_node, 1);
g = gimple_build_assign (sctx.idx, PLUS_EXPR, sctx.idx, t);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (header));
g = gimple_build_cond (LT_EXPR, sctx.idx, vf, body, end);
gimple_seq_add_stmt (seq, g);
gimple_seq_add_stmt (seq, gimple_build_label (end));
}
}
if (sctx.is_simt)
{
gimple_seq_add_seq (dlist, sctx.simt_dlist);
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMT_EXIT, 1, simtrec);
gimple_seq_add_stmt (dlist, g);
}
/* The copyin sequence is not to be executed by the main thread, since
that would result in self-copies. Perhaps not visible to scalars,
but it certainly is to C++ operator=. */
if (copyin_seq)
{
x = build_call_expr (builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM),
0);
x = build2 (NE_EXPR, boolean_type_node, x,
build_int_cst (TREE_TYPE (x), 0));
x = build3 (COND_EXPR, void_type_node, x, copyin_seq, NULL);
gimplify_and_add (x, ilist);
}
/* If any copyin variable is passed by reference, we must ensure the
master thread doesn't modify it before it is copied over in all
threads. Similarly for variables in both firstprivate and
lastprivate clauses we need to ensure the lastprivate copying
happens after firstprivate copying in all threads. And similarly
for UDRs if initializer expression refers to omp_orig. */
if (copyin_by_ref || lastprivate_firstprivate
|| (reduction_omp_orig_ref
&& !ctx->scan_inclusive
&& !ctx->scan_exclusive))
{
/* Don't add any barrier for #pragma omp simd or
#pragma omp distribute. */
if (!is_task_ctx (ctx)
&& (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
|| gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_FOR))
gimple_seq_add_stmt (ilist, omp_build_barrier (NULL_TREE));
}
/* If max_vf is non-zero, then we can use only a vectorization factor
up to the max_vf we chose. So stick it into the safelen clause. */
if (maybe_ne (sctx.max_vf, 0U))
{
tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
OMP_CLAUSE_SAFELEN);
poly_uint64 safe_len;
if (c == NULL_TREE
|| (poly_int_tree_p (OMP_CLAUSE_SAFELEN_EXPR (c), &safe_len)
&& maybe_gt (safe_len, sctx.max_vf)))
{
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_SAFELEN);
OMP_CLAUSE_SAFELEN_EXPR (c) = build_int_cst (integer_type_node,
sctx.max_vf);
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (ctx->stmt);
gimple_omp_for_set_clauses (ctx->stmt, c);
}
}
}
/* Create temporary variables for lastprivate(conditional:) implementation
in context CTX with CLAUSES. */
static void
lower_lastprivate_conditional_clauses (tree *clauses, omp_context *ctx)
{
tree iter_type = NULL_TREE;
tree cond_ptr = NULL_TREE;
tree iter_var = NULL_TREE;
bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD);
tree next = *clauses;
for (tree c = *clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c))
{
if (is_simd)
{
tree cc = omp_find_clause (next, OMP_CLAUSE__CONDTEMP_);
gcc_assert (cc);
if (iter_type == NULL_TREE)
{
iter_type = TREE_TYPE (OMP_CLAUSE_DECL (cc));
iter_var = create_tmp_var_raw (iter_type);
DECL_CONTEXT (iter_var) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (iter_var) = 1;
DECL_CHAIN (iter_var) = ctx->block_vars;
ctx->block_vars = iter_var;
tree c3
= build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE__CONDTEMP__ITER (c3) = 1;
OMP_CLAUSE_DECL (c3) = iter_var;
OMP_CLAUSE_CHAIN (c3) = *clauses;
*clauses = c3;
ctx->lastprivate_conditional_map = new hash_map<tree, tree>;
}
next = OMP_CLAUSE_CHAIN (cc);
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
tree v = lookup_decl (OMP_CLAUSE_DECL (cc), ctx);
ctx->lastprivate_conditional_map->put (o, v);
continue;
}
if (iter_type == NULL)
{
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR)
{
struct omp_for_data fd;
omp_extract_for_data (as_a <gomp_for *> (ctx->stmt), &fd,
NULL);
iter_type = unsigned_type_for (fd.iter_type);
}
else if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
iter_type = unsigned_type_node;
tree c2 = omp_find_clause (*clauses, OMP_CLAUSE__CONDTEMP_);
if (c2)
{
cond_ptr
= lookup_decl_in_outer_ctx (OMP_CLAUSE_DECL (c2), ctx);
OMP_CLAUSE_DECL (c2) = cond_ptr;
}
else
{
cond_ptr = create_tmp_var_raw (build_pointer_type (iter_type));
DECL_CONTEXT (cond_ptr) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (cond_ptr) = 1;
DECL_CHAIN (cond_ptr) = ctx->block_vars;
ctx->block_vars = cond_ptr;
c2 = build_omp_clause (UNKNOWN_LOCATION,
OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE_DECL (c2) = cond_ptr;
OMP_CLAUSE_CHAIN (c2) = *clauses;
*clauses = c2;
}
iter_var = create_tmp_var_raw (iter_type);
DECL_CONTEXT (iter_var) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (iter_var) = 1;
DECL_CHAIN (iter_var) = ctx->block_vars;
ctx->block_vars = iter_var;
tree c3
= build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__CONDTEMP_);
OMP_CLAUSE__CONDTEMP__ITER (c3) = 1;
OMP_CLAUSE_DECL (c3) = iter_var;
OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2);
OMP_CLAUSE_CHAIN (c2) = c3;
ctx->lastprivate_conditional_map = new hash_map<tree, tree>;
}
tree v = create_tmp_var_raw (iter_type);
DECL_CONTEXT (v) = current_function_decl;
DECL_SEEN_IN_BIND_EXPR_P (v) = 1;
DECL_CHAIN (v) = ctx->block_vars;
ctx->block_vars = v;
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
ctx->lastprivate_conditional_map->put (o, v);
}
}
/* Generate code to implement the LASTPRIVATE clauses. This is used for
both parallel and workshare constructs. PREDICATE may be NULL if it's
always true. BODY_P is the sequence to insert early initialization
if needed, STMT_LIST is where the non-conditional lastprivate handling
goes into and CSTMT_LIST is a sequence that needs to be run in a critical
section. */
static void
lower_lastprivate_clauses (tree clauses, tree predicate, gimple_seq *body_p,
gimple_seq *stmt_list, gimple_seq *cstmt_list,
omp_context *ctx)
{
tree x, c, label = NULL, orig_clauses = clauses;
bool par_clauses = false;
tree simduid = NULL, lastlane = NULL, simtcond = NULL, simtlast = NULL;
unsigned HOST_WIDE_INT conditional_off = 0;
gimple_seq post_stmt_list = NULL;
/* Early exit if there are no lastprivate or linear clauses. */
for (; clauses ; clauses = OMP_CLAUSE_CHAIN (clauses))
if (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (clauses)))
break;
if (clauses == NULL)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, look for the clauses on the
parallel statement itself. */
if (is_parallel_ctx (ctx))
return;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
return;
clauses = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
if (clauses == NULL)
return;
par_clauses = true;
}
bool maybe_simt = false;
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
{
maybe_simt = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMT_);
simduid = omp_find_clause (orig_clauses, OMP_CLAUSE__SIMDUID_);
if (simduid)
simduid = OMP_CLAUSE__SIMDUID__DECL (simduid);
}
if (predicate)
{
gcond *stmt;
tree label_true, arm1, arm2;
enum tree_code pred_code = TREE_CODE (predicate);
label = create_artificial_label (UNKNOWN_LOCATION);
label_true = create_artificial_label (UNKNOWN_LOCATION);
if (TREE_CODE_CLASS (pred_code) == tcc_comparison)
{
arm1 = TREE_OPERAND (predicate, 0);
arm2 = TREE_OPERAND (predicate, 1);
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
gimplify_expr (&arm2, stmt_list, NULL, is_gimple_val, fb_rvalue);
}
else
{
arm1 = predicate;
gimplify_expr (&arm1, stmt_list, NULL, is_gimple_val, fb_rvalue);
arm2 = boolean_false_node;
pred_code = NE_EXPR;
}
if (maybe_simt)
{
c = build2 (pred_code, boolean_type_node, arm1, arm2);
c = fold_convert (integer_type_node, c);
simtcond = create_tmp_var (integer_type_node);
gimplify_assign (simtcond, c, stmt_list);
gcall *g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY,
1, simtcond);
c = create_tmp_var (integer_type_node);
gimple_call_set_lhs (g, c);
gimple_seq_add_stmt (stmt_list, g);
stmt = gimple_build_cond (NE_EXPR, c, integer_zero_node,
label_true, label);
}
else
stmt = gimple_build_cond (pred_code, arm1, arm2, label_true, label);
gimple_seq_add_stmt (stmt_list, stmt);
gimple_seq_add_stmt (stmt_list, gimple_build_label (label_true));
}
tree cond_ptr = NULL_TREE;
for (c = clauses; c ;)
{
tree var, new_var;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
gimple_seq *this_stmt_list = stmt_list;
tree lab2 = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
&& ctx->lastprivate_conditional_map
&& !ctx->combined_into_simd_safelen1)
{
gcc_assert (body_p);
if (simduid)
goto next;
if (cond_ptr == NULL_TREE)
{
cond_ptr = omp_find_clause (orig_clauses, OMP_CLAUSE__CONDTEMP_);
cond_ptr = OMP_CLAUSE_DECL (cond_ptr);
}
tree type = TREE_TYPE (TREE_TYPE (cond_ptr));
tree o = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
tree v = *ctx->lastprivate_conditional_map->get (o);
gimplify_assign (v, build_zero_cst (type), body_p);
this_stmt_list = cstmt_list;
tree mem;
if (POINTER_TYPE_P (TREE_TYPE (cond_ptr)))
{
mem = build2 (MEM_REF, type, cond_ptr,
build_int_cst (TREE_TYPE (cond_ptr),
conditional_off));
conditional_off += tree_to_uhwi (TYPE_SIZE_UNIT (type));
}
else
mem = build4 (ARRAY_REF, type, cond_ptr,
size_int (conditional_off++), NULL_TREE, NULL_TREE);
tree mem2 = copy_node (mem);
gimple_seq seq = NULL;
mem = force_gimple_operand (mem, &seq, true, NULL_TREE);
gimple_seq_add_seq (this_stmt_list, seq);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
gimple *g = gimple_build_cond (GT_EXPR, v, mem, lab1, lab2);
gimple_seq_add_stmt (this_stmt_list, g);
gimple_seq_add_stmt (this_stmt_list, gimple_build_label (lab1));
gimplify_assign (mem2, v, this_stmt_list);
}
else if (predicate
&& ctx->combined_into_simd_safelen1
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_CONDITIONAL (c)
&& ctx->lastprivate_conditional_map)
this_stmt_list = &post_stmt_list;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYOUT (c)))
{
var = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c)
&& is_taskloop_ctx (ctx))
{
gcc_checking_assert (ctx->outer && is_task_ctx (ctx->outer));
new_var = lookup_decl (var, ctx->outer);
}
else
{
new_var = lookup_decl (var, ctx);
/* Avoid uninitialized warnings for lastprivate and
for linear iterators. */
if (predicate
&& (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
|| OMP_CLAUSE_LINEAR_NO_COPYIN (c)))
TREE_NO_WARNING (new_var) = 1;
}
if (!maybe_simt && simduid && DECL_HAS_VALUE_EXPR_P (new_var))
{
tree val = DECL_VALUE_EXPR (new_var);
if (TREE_CODE (val) == ARRAY_REF
&& VAR_P (TREE_OPERAND (val, 0))
&& lookup_attribute ("omp simd array",
DECL_ATTRIBUTES (TREE_OPERAND (val,
0))))
{
if (lastlane == NULL)
{
lastlane = create_tmp_var (unsigned_type_node);
gcall *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LAST_LANE,
2, simduid,
TREE_OPERAND (val, 1));
gimple_call_set_lhs (g, lastlane);
gimple_seq_add_stmt (this_stmt_list, g);
}
new_var = build4 (ARRAY_REF, TREE_TYPE (val),
TREE_OPERAND (val, 0), lastlane,
NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (new_var) = 1;
}
}
else if (maybe_simt)
{
tree val = (DECL_HAS_VALUE_EXPR_P (new_var)
? DECL_VALUE_EXPR (new_var)
: new_var);
if (simtlast == NULL)
{
simtlast = create_tmp_var (unsigned_type_node);
gcall *g = gimple_build_call_internal
(IFN_GOMP_SIMT_LAST_LANE, 1, simtcond);
gimple_call_set_lhs (g, simtlast);
gimple_seq_add_stmt (this_stmt_list, g);
}
x = build_call_expr_internal_loc
(UNKNOWN_LOCATION, IFN_GOMP_SIMT_XCHG_IDX,
TREE_TYPE (val), 2, val, simtlast);
new_var = unshare_expr (new_var);
gimplify_assign (new_var, x, this_stmt_list);
new_var = unshare_expr (new_var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (this_stmt_list,
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c));
OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c) = NULL;
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
{
lower_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
gimple_seq_add_seq (this_stmt_list,
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c));
OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c) = NULL;
}
x = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
&& OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c)
&& is_taskloop_ctx (ctx))
{
tree ovar = maybe_lookup_decl_in_outer_ctx (var,
ctx->outer->outer);
if (is_global_var (ovar))
x = ovar;
}
if (!x)
x = build_outer_var_ref (var, ctx, OMP_CLAUSE_LASTPRIVATE);
if (omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, new_var);
gimplify_and_add (x, this_stmt_list);
if (lab2)
gimple_seq_add_stmt (this_stmt_list, gimple_build_label (lab2));
}
next:
c = OMP_CLAUSE_CHAIN (c);
if (c == NULL && !par_clauses)
{
/* If this was a workshare clause, see if it had been combined
with its parallel. In that case, continue looking for the
clauses also on the parallel statement itself. */
if (is_parallel_ctx (ctx))
break;
ctx = ctx->outer;
if (ctx == NULL || !is_parallel_ctx (ctx))
break;
c = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE_LASTPRIVATE);
par_clauses = true;
}
}
if (label)
gimple_seq_add_stmt (stmt_list, gimple_build_label (label));
gimple_seq_add_seq (stmt_list, post_stmt_list);
}
/* Lower the OpenACC reductions of CLAUSES for compute axis LEVEL
(which might be a placeholder). INNER is true if this is an inner
axis of a multi-axis loop. FORK and JOIN are (optional) fork and
join markers. Generate the before-loop forking sequence in
FORK_SEQ and the after-loop joining sequence to JOIN_SEQ. The
general form of these sequences is
GOACC_REDUCTION_SETUP
GOACC_FORK
GOACC_REDUCTION_INIT
...
GOACC_REDUCTION_FINI
GOACC_JOIN
GOACC_REDUCTION_TEARDOWN. */
static void
lower_oacc_reductions (location_t loc, tree clauses, tree level, bool inner,
gcall *fork, gcall *join, gimple_seq *fork_seq,
gimple_seq *join_seq, omp_context *ctx)
{
gimple_seq before_fork = NULL;
gimple_seq after_fork = NULL;
gimple_seq before_join = NULL;
gimple_seq after_join = NULL;
tree init_code = NULL_TREE, fini_code = NULL_TREE,
setup_code = NULL_TREE, teardown_code = NULL_TREE;
unsigned offset = 0;
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION)
{
/* No 'reduction' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
tree orig = OMP_CLAUSE_DECL (c);
tree var = maybe_lookup_decl (orig, ctx);
tree ref_to_res = NULL_TREE;
tree incoming, outgoing, v1, v2, v3;
bool is_private = false;
enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
if (rcode == MINUS_EXPR)
rcode = PLUS_EXPR;
else if (rcode == TRUTH_ANDIF_EXPR)
rcode = BIT_AND_EXPR;
else if (rcode == TRUTH_ORIF_EXPR)
rcode = BIT_IOR_EXPR;
tree op = build_int_cst (unsigned_type_node, rcode);
if (!var)
var = orig;
incoming = outgoing = var;
if (!inner)
{
/* See if an outer construct also reduces this variable. */
omp_context *outer = ctx;
while (omp_context *probe = outer->outer)
{
enum gimple_code type = gimple_code (probe->stmt);
tree cls;
switch (type)
{
case GIMPLE_OMP_FOR:
cls = gimple_omp_for_clauses (probe->stmt);
break;
case GIMPLE_OMP_TARGET:
/* No 'reduction' clauses inside OpenACC 'kernels'
regions. */
gcc_checking_assert (!is_oacc_kernels (probe));
if (!is_gimple_omp_offloaded (probe->stmt))
goto do_lookup;
cls = gimple_omp_target_clauses (probe->stmt);
break;
default:
goto do_lookup;
}
outer = probe;
for (; cls; cls = OMP_CLAUSE_CHAIN (cls))
if (OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_REDUCTION
&& orig == OMP_CLAUSE_DECL (cls))
{
incoming = outgoing = lookup_decl (orig, probe);
goto has_outer_reduction;
}
else if ((OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_FIRSTPRIVATE
|| OMP_CLAUSE_CODE (cls) == OMP_CLAUSE_PRIVATE)
&& orig == OMP_CLAUSE_DECL (cls))
{
is_private = true;
goto do_lookup;
}
}
do_lookup:
/* This is the outermost construct with this reduction,
see if there's a mapping for it. */
if (gimple_code (outer->stmt) == GIMPLE_OMP_TARGET
&& maybe_lookup_field (orig, outer) && !is_private)
{
ref_to_res = build_receiver_ref (orig, false, outer);
if (omp_is_reference (orig))
ref_to_res = build_simple_mem_ref (ref_to_res);
tree type = TREE_TYPE (var);
if (POINTER_TYPE_P (type))
type = TREE_TYPE (type);
outgoing = var;
incoming = omp_reduction_init_op (loc, rcode, type);
}
else
{
/* Try to look at enclosing contexts for reduction var,
use original if no mapping found. */
tree t = NULL_TREE;
omp_context *c = ctx->outer;
while (c && !t)
{
t = maybe_lookup_decl (orig, c);
c = c->outer;
}
incoming = outgoing = (t ? t : orig);
}
has_outer_reduction:;
}
if (!ref_to_res)
ref_to_res = integer_zero_node;
if (omp_is_reference (orig))
{
tree type = TREE_TYPE (var);
const char *id = IDENTIFIER_POINTER (DECL_NAME (var));
if (!inner)
{
tree x = create_tmp_var (TREE_TYPE (type), id);
gimplify_assign (var, build_fold_addr_expr (x), fork_seq);
}
v1 = create_tmp_var (type, id);
v2 = create_tmp_var (type, id);
v3 = create_tmp_var (type, id);
gimplify_assign (v1, var, fork_seq);
gimplify_assign (v2, var, fork_seq);
gimplify_assign (v3, var, fork_seq);
var = build_simple_mem_ref (var);
v1 = build_simple_mem_ref (v1);
v2 = build_simple_mem_ref (v2);
v3 = build_simple_mem_ref (v3);
outgoing = build_simple_mem_ref (outgoing);
if (!TREE_CONSTANT (incoming))
incoming = build_simple_mem_ref (incoming);
}
else
v1 = v2 = v3 = var;
/* Determine position in reduction buffer, which may be used
by target. The parser has ensured that this is not a
variable-sized type. */
fixed_size_mode mode
= as_a <fixed_size_mode> (TYPE_MODE (TREE_TYPE (var)));
unsigned align = GET_MODE_ALIGNMENT (mode) / BITS_PER_UNIT;
offset = (offset + align - 1) & ~(align - 1);
tree off = build_int_cst (sizetype, offset);
offset += GET_MODE_SIZE (mode);
if (!init_code)
{
init_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_INIT);
fini_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_FINI);
setup_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_SETUP);
teardown_code = build_int_cst (integer_type_node,
IFN_GOACC_REDUCTION_TEARDOWN);
}
tree setup_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, setup_code,
unshare_expr (ref_to_res),
incoming, level, op, off);
tree init_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, init_code,
unshare_expr (ref_to_res),
v1, level, op, off);
tree fini_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, fini_code,
unshare_expr (ref_to_res),
v2, level, op, off);
tree teardown_call
= build_call_expr_internal_loc (loc, IFN_GOACC_REDUCTION,
TREE_TYPE (var), 6, teardown_code,
ref_to_res, v3, level, op, off);
gimplify_assign (v1, setup_call, &before_fork);
gimplify_assign (v2, init_call, &after_fork);
gimplify_assign (v3, fini_call, &before_join);
gimplify_assign (outgoing, teardown_call, &after_join);
}
/* Now stitch things together. */
gimple_seq_add_seq (fork_seq, before_fork);
if (fork)
gimple_seq_add_stmt (fork_seq, fork);
gimple_seq_add_seq (fork_seq, after_fork);
gimple_seq_add_seq (join_seq, before_join);
if (join)
gimple_seq_add_stmt (join_seq, join);
gimple_seq_add_seq (join_seq, after_join);
}
/* Generate code to implement the REDUCTION clauses, append it
to STMT_SEQP. CLIST if non-NULL is a pointer to a sequence
that should be emitted also inside of the critical section,
in that case clear *CLIST afterwards, otherwise leave it as is
and let the caller emit it itself. */
static void
lower_reduction_clauses (tree clauses, gimple_seq *stmt_seqp,
gimple_seq *clist, omp_context *ctx)
{
gimple_seq sub_seq = NULL;
gimple *stmt;
tree x, c;
int count = 0;
/* OpenACC loop reductions are handled elsewhere. */
if (is_gimple_omp_oacc (ctx->stmt))
return;
/* SIMD reductions are handled in lower_rec_input_clauses. */
if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_SIMD)
return;
/* inscan reductions are handled elsewhere. */
if (ctx->scan_inclusive || ctx->scan_exclusive)
return;
/* First see if there is exactly one reduction clause. Use OMP_ATOMIC
update in that case, otherwise use a lock. */
for (c = clauses; c && count < 2; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& !OMP_CLAUSE_REDUCTION_TASK (c))
{
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)
|| TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
/* Never use OMP_ATOMIC for array reductions or UDRs. */
count = -1;
break;
}
count++;
}
if (count == 0)
return;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, ref, new_var, orig_var;
enum tree_code code;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_REDUCTION_TASK (c))
continue;
enum omp_clause_code ccode = OMP_CLAUSE_REDUCTION;
orig_var = var = OMP_CLAUSE_DECL (c);
if (TREE_CODE (var) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
else
{
/* If this is a pointer or referenced based array
section, the var could be private in the outer
context e.g. on orphaned loop construct. Pretend this
is private variable's outer reference. */
ccode = OMP_CLAUSE_PRIVATE;
if (TREE_CODE (var) == INDIRECT_REF)
var = TREE_OPERAND (var, 0);
}
orig_var = var;
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
}
new_var = lookup_decl (var, ctx);
if (var == OMP_CLAUSE_DECL (c) && omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
ref = build_outer_var_ref (var, ctx, ccode);
code = OMP_CLAUSE_REDUCTION_CODE (c);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (count == 1)
{
tree addr = build_fold_addr_expr_loc (clause_loc, ref);
addr = save_expr (addr);
ref = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (addr)), addr);
x = fold_build2_loc (clause_loc, code, TREE_TYPE (ref), ref, new_var);
x = build2 (OMP_ATOMIC, void_type_node, addr, x);
OMP_ATOMIC_MEMORY_ORDER (x) = OMP_MEMORY_ORDER_RELAXED;
gimplify_and_add (x, stmt_seqp);
return;
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF)
{
tree d = OMP_CLAUSE_DECL (c);
tree type = TREE_TYPE (d);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
tree i = create_tmp_var (TREE_TYPE (v));
tree ptype = build_pointer_type (TREE_TYPE (type));
tree bias = TREE_OPERAND (d, 1);
d = TREE_OPERAND (d, 0);
if (TREE_CODE (d) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl (b, ctx);
if (b == NULL)
{
b = TREE_OPERAND (d, 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
}
if (integer_zerop (bias))
bias = b;
else
{
bias = fold_convert_loc (clause_loc, TREE_TYPE (b), bias);
bias = fold_build2_loc (clause_loc, PLUS_EXPR,
TREE_TYPE (b), b, bias);
}
d = TREE_OPERAND (d, 0);
}
/* For ref build_outer_var_ref already performs this, so
only new_var needs a dereference. */
if (TREE_CODE (d) == INDIRECT_REF)
{
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
gcc_assert (omp_is_reference (var) && var == orig_var);
}
else if (TREE_CODE (d) == ADDR_EXPR)
{
if (orig_var == var)
{
new_var = build_fold_addr_expr (new_var);
ref = build_fold_addr_expr (ref);
}
}
else
{
gcc_assert (orig_var == var);
if (omp_is_reference (var))
ref = build_fold_addr_expr (ref);
}
if (DECL_P (v))
{
tree t = maybe_lookup_decl (v, ctx);
if (t)
v = t;
else
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
gimplify_expr (&v, stmt_seqp, NULL, is_gimple_val, fb_rvalue);
}
if (!integer_zerop (bias))
{
bias = fold_convert_loc (clause_loc, sizetype, bias);
new_var = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (new_var), new_var,
unshare_expr (bias));
ref = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (ref), ref, bias);
}
new_var = fold_convert_loc (clause_loc, ptype, new_var);
ref = fold_convert_loc (clause_loc, ptype, ref);
tree m = create_tmp_var (ptype);
gimplify_assign (m, new_var, stmt_seqp);
new_var = m;
m = create_tmp_var (ptype);
gimplify_assign (m, ref, stmt_seqp);
ref = m;
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), stmt_seqp);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree end = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (body));
tree priv = build_simple_mem_ref_loc (clause_loc, new_var);
tree out = build_simple_mem_ref_loc (clause_loc, ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
SET_DECL_VALUE_EXPR (placeholder, out);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
SET_DECL_VALUE_EXPR (decl_placeholder, priv);
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq,
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (out), out, priv);
out = unshare_expr (out);
gimplify_assign (out, x, &sub_seq);
}
gimple *g = gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (&sub_seq, g);
g = gimple_build_cond (LE_EXPR, i, v, body, end);
gimple_seq_add_stmt (&sub_seq, g);
gimple_seq_add_stmt (&sub_seq, gimple_build_label (end));
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (omp_is_reference (var)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (clause_loc, ref);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (&sub_seq, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
}
else
{
x = build2 (code, TREE_TYPE (ref), ref, new_var);
ref = build_outer_var_ref (var, ctx);
gimplify_assign (ref, x, &sub_seq);
}
}
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
gimple_seq_add_seq (stmt_seqp, sub_seq);
if (clist)
{
gimple_seq_add_seq (stmt_seqp, *clist);
*clist = NULL;
}
stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END),
0);
gimple_seq_add_stmt (stmt_seqp, stmt);
}
/* Generate code to implement the COPYPRIVATE clauses. */
static void
lower_copyprivate_clauses (tree clauses, gimple_seq *slist, gimple_seq *rlist,
omp_context *ctx)
{
tree c;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree var, new_var, ref, x;
bool by_ref;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYPRIVATE)
continue;
var = OMP_CLAUSE_DECL (c);
by_ref = use_pointer_for_field (var, NULL);
ref = build_sender_ref (var, ctx);
x = new_var = lookup_decl_in_outer_ctx (var, ctx);
if (by_ref)
{
x = build_fold_addr_expr_loc (clause_loc, new_var);
x = fold_convert_loc (clause_loc, TREE_TYPE (ref), x);
}
gimplify_assign (ref, x, slist);
ref = build_receiver_ref (var, false, ctx);
if (by_ref)
{
ref = fold_convert_loc (clause_loc,
build_pointer_type (TREE_TYPE (new_var)),
ref);
ref = build_fold_indirect_ref_loc (clause_loc, ref);
}
if (omp_is_reference (var))
{
ref = fold_convert_loc (clause_loc, TREE_TYPE (new_var), ref);
ref = build_simple_mem_ref_loc (clause_loc, ref);
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
}
x = lang_hooks.decls.omp_clause_assign_op (c, new_var, ref);
gimplify_and_add (x, rlist);
}
}
/* Generate code to implement the clauses, FIRSTPRIVATE, COPYIN, LASTPRIVATE,
and REDUCTION from the sender (aka parent) side. */
static void
lower_send_clauses (tree clauses, gimple_seq *ilist, gimple_seq *olist,
omp_context *ctx)
{
tree c, t;
int ignored_looptemp = 0;
bool is_taskloop = false;
/* For taskloop, ignore first two _looptemp_ clauses, those are initialized
by GOMP_taskloop. */
if (is_task_ctx (ctx) && gimple_omp_task_taskloop_p (ctx->stmt))
{
ignored_looptemp = 2;
is_taskloop = true;
}
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
{
tree val, ref, x, var;
bool by_ref, do_in = false, do_out = false;
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_PRIVATE:
if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
continue;
case OMP_CLAUSE_FIRSTPRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE_LASTPRIVATE:
case OMP_CLAUSE_IN_REDUCTION:
case OMP_CLAUSE__REDUCTEMP_:
break;
case OMP_CLAUSE_REDUCTION:
if (is_task_ctx (ctx) || OMP_CLAUSE_REDUCTION_TASK (c))
continue;
break;
case OMP_CLAUSE_SHARED:
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
break;
continue;
case OMP_CLAUSE__LOOPTEMP_:
if (ignored_looptemp)
{
ignored_looptemp--;
continue;
}
break;
default:
continue;
}
val = OMP_CLAUSE_DECL (c);
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION)
&& TREE_CODE (val) == MEM_REF)
{
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == POINTER_PLUS_EXPR)
val = TREE_OPERAND (val, 0);
if (TREE_CODE (val) == INDIRECT_REF
|| TREE_CODE (val) == ADDR_EXPR)
val = TREE_OPERAND (val, 0);
if (is_variable_sized (val))
continue;
}
/* For OMP_CLAUSE_SHARED_FIRSTPRIVATE, look beyond the
outer taskloop region. */
omp_context *ctx_for_o = ctx;
if (is_taskloop
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
&& OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
ctx_for_o = ctx->outer;
var = lookup_decl_in_outer_ctx (val, ctx_for_o);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_COPYIN
&& is_global_var (var)
&& (val == OMP_CLAUSE_DECL (c)
|| !is_task_ctx (ctx)
|| (TREE_CODE (TREE_TYPE (val)) != POINTER_TYPE
&& (TREE_CODE (TREE_TYPE (val)) != REFERENCE_TYPE
|| (TREE_CODE (TREE_TYPE (TREE_TYPE (val)))
!= POINTER_TYPE)))))
continue;
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx_for_o);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)
{
/* Handle taskloop firstprivate/lastprivate, where the
lastprivate on GIMPLE_OMP_TASK is represented as
OMP_CLAUSE_SHARED_FIRSTPRIVATE. */
tree f = lookup_sfield ((splay_tree_key) &DECL_UID (val), ctx);
x = omp_build_component_ref (ctx->sender_decl, f);
if (use_pointer_for_field (val, ctx))
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
DECL_ABSTRACT_ORIGIN (f) = NULL;
continue;
}
if (((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION)
|| val == OMP_CLAUSE_DECL (c))
&& is_variable_sized (val))
continue;
by_ref = use_pointer_for_field (val, NULL);
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_FIRSTPRIVATE:
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c)
&& !by_ref
&& is_task_ctx (ctx))
TREE_NO_WARNING (var) = 1;
do_in = true;
break;
case OMP_CLAUSE_PRIVATE:
case OMP_CLAUSE_COPYIN:
case OMP_CLAUSE__LOOPTEMP_:
case OMP_CLAUSE__REDUCTEMP_:
do_in = true;
break;
case OMP_CLAUSE_LASTPRIVATE:
if (by_ref || omp_is_reference (val))
{
if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
continue;
do_in = true;
}
else
{
do_out = true;
if (lang_hooks.decls.omp_private_outer_ref (val))
do_in = true;
}
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
do_in = true;
if (val == OMP_CLAUSE_DECL (c))
{
if (is_task_ctx (ctx))
by_ref = use_pointer_for_field (val, ctx);
else
do_out = !(by_ref || omp_is_reference (val));
}
else
by_ref = TREE_CODE (TREE_TYPE (val)) == ARRAY_TYPE;
break;
default:
gcc_unreachable ();
}
if (do_in)
{
ref = build_sender_ref (val, ctx);
x = by_ref ? build_fold_addr_expr_loc (clause_loc, var) : var;
gimplify_assign (ref, x, ilist);
if (is_task_ctx (ctx))
DECL_ABSTRACT_ORIGIN (TREE_OPERAND (ref, 1)) = NULL;
}
if (do_out)
{
ref = build_sender_ref (val, ctx);
gimplify_assign (var, ref, olist);
}
}
}
/* Generate code to implement SHARED from the sender (aka parent)
side. This is trickier, since GIMPLE_OMP_PARALLEL_CLAUSES doesn't
list things that got automatically shared. */
static void
lower_send_shared_vars (gimple_seq *ilist, gimple_seq *olist, omp_context *ctx)
{
tree var, ovar, nvar, t, f, x, record_type;
if (ctx->record_type == NULL)
return;
record_type = ctx->srecord_type ? ctx->srecord_type : ctx->record_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
{
ovar = DECL_ABSTRACT_ORIGIN (f);
if (!ovar || TREE_CODE (ovar) == FIELD_DECL)
continue;
nvar = maybe_lookup_decl (ovar, ctx);
if (!nvar || !DECL_HAS_VALUE_EXPR_P (nvar))
continue;
/* If CTX is a nested parallel directive. Find the immediately
enclosing parallel or workshare construct that contains a
mapping for OVAR. */
var = lookup_decl_in_outer_ctx (ovar, ctx);
t = omp_member_access_dummy_var (var);
if (t)
{
var = DECL_VALUE_EXPR (var);
tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
if (o != t)
var = unshare_and_remap (var, t, o);
else
var = unshare_expr (var);
}
if (use_pointer_for_field (ovar, ctx))
{
x = build_sender_ref (ovar, ctx);
if (TREE_CODE (TREE_TYPE (f)) == ARRAY_TYPE
&& TREE_TYPE (f) == TREE_TYPE (ovar))
{
gcc_assert (is_parallel_ctx (ctx)
&& DECL_ARTIFICIAL (ovar));
/* _condtemp_ clause. */
var = build_constructor (TREE_TYPE (x), NULL);
}
else
var = build_fold_addr_expr (var);
gimplify_assign (x, var, ilist);
}
else
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (x, var, ilist);
if (!TREE_READONLY (var)
/* We don't need to receive a new reference to a result
or parm decl. In fact we may not store to it as we will
invalidate any pending RSO and generate wrong gimple
during inlining. */
&& !((TREE_CODE (var) == RESULT_DECL
|| TREE_CODE (var) == PARM_DECL)
&& DECL_BY_REFERENCE (var)))
{
x = build_sender_ref (ovar, ctx);
gimplify_assign (var, x, olist);
}
}
}
}
/* Emit an OpenACC head marker call, encapulating the partitioning and
other information that must be processed by the target compiler.
Return the maximum number of dimensions the associated loop might
be partitioned over. */
static unsigned
lower_oacc_head_mark (location_t loc, tree ddvar, tree clauses,
gimple_seq *seq, omp_context *ctx)
{
unsigned levels = 0;
unsigned tag = 0;
tree gang_static = NULL_TREE;
auto_vec<tree, 5> args;
args.quick_push (build_int_cst
(integer_type_node, IFN_UNIQUE_OACC_HEAD_MARK));
args.quick_push (ddvar);
for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
{
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_GANG:
tag |= OLF_DIM_GANG;
gang_static = OMP_CLAUSE_GANG_STATIC_EXPR (c);
/* static:* is represented by -1, and we can ignore it, as
scheduling is always static. */
if (gang_static && integer_minus_onep (gang_static))
gang_static = NULL_TREE;
levels++;
break;
case OMP_CLAUSE_WORKER:
tag |= OLF_DIM_WORKER;
levels++;
break;
case OMP_CLAUSE_VECTOR:
tag |= OLF_DIM_VECTOR;
levels++;
break;
case OMP_CLAUSE_SEQ:
tag |= OLF_SEQ;
break;
case OMP_CLAUSE_AUTO:
tag |= OLF_AUTO;
break;
case OMP_CLAUSE_INDEPENDENT:
tag |= OLF_INDEPENDENT;
break;
case OMP_CLAUSE_TILE:
tag |= OLF_TILE;
break;
default:
continue;
}
}
if (gang_static)
{
if (DECL_P (gang_static))
gang_static = build_outer_var_ref (gang_static, ctx);
tag |= OLF_GANG_STATIC;
}
omp_context *tgt = enclosing_target_ctx (ctx);
if (!tgt || is_oacc_parallel_or_serial (tgt))
;
else if (is_oacc_kernels (tgt))
/* Not using this loops handling inside OpenACC 'kernels' regions. */
gcc_unreachable ();
else
gcc_unreachable ();
/* In a parallel region, loops are implicitly INDEPENDENT. */
if (!tgt || is_oacc_parallel_or_serial (tgt))
tag |= OLF_INDEPENDENT;
if (tag & OLF_TILE)
/* Tiling could use all 3 levels. */
levels = 3;
else
{
/* A loop lacking SEQ, GANG, WORKER and/or VECTOR could be AUTO.
Ensure at least one level, or 2 for possible auto
partitioning */
bool maybe_auto = !(tag & (((GOMP_DIM_MASK (GOMP_DIM_MAX) - 1)
<< OLF_DIM_BASE) | OLF_SEQ));
if (levels < 1u + maybe_auto)
levels = 1u + maybe_auto;
}
args.quick_push (build_int_cst (integer_type_node, levels));
args.quick_push (build_int_cst (integer_type_node, tag));
if (gang_static)
args.quick_push (gang_static);
gcall *call = gimple_build_call_internal_vec (IFN_UNIQUE, args);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
return levels;
}
/* Emit an OpenACC lopp head or tail marker to SEQ. LEVEL is the
partitioning level of the enclosed region. */
static void
lower_oacc_loop_marker (location_t loc, tree ddvar, bool head,
tree tofollow, gimple_seq *seq)
{
int marker_kind = (head ? IFN_UNIQUE_OACC_HEAD_MARK
: IFN_UNIQUE_OACC_TAIL_MARK);
tree marker = build_int_cst (integer_type_node, marker_kind);
int nargs = 2 + (tofollow != NULL_TREE);
gcall *call = gimple_build_call_internal (IFN_UNIQUE, nargs,
marker, ddvar, tofollow);
gimple_set_location (call, loc);
gimple_set_lhs (call, ddvar);
gimple_seq_add_stmt (seq, call);
}
/* Generate the before and after OpenACC loop sequences. CLAUSES are
the loop clauses, from which we extract reductions. Initialize
HEAD and TAIL. */
static void
lower_oacc_head_tail (location_t loc, tree clauses,
gimple_seq *head, gimple_seq *tail, omp_context *ctx)
{
bool inner = false;
tree ddvar = create_tmp_var (integer_type_node, ".data_dep");
gimple_seq_add_stmt (head, gimple_build_assign (ddvar, integer_zero_node));
unsigned count = lower_oacc_head_mark (loc, ddvar, clauses, head, ctx);
tree fork_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_FORK);
tree join_kind = build_int_cst (unsigned_type_node, IFN_UNIQUE_OACC_JOIN);
gcc_assert (count);
for (unsigned done = 1; count; count--, done++)
{
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
tree place = build_int_cst (integer_type_node, -1);
gcall *fork = gimple_build_call_internal (IFN_UNIQUE, 3,
fork_kind, ddvar, place);
gimple_set_location (fork, loc);
gimple_set_lhs (fork, ddvar);
gcall *join = gimple_build_call_internal (IFN_UNIQUE, 3,
join_kind, ddvar, place);
gimple_set_location (join, loc);
gimple_set_lhs (join, ddvar);
/* Mark the beginning of this level sequence. */
if (inner)
lower_oacc_loop_marker (loc, ddvar, true,
build_int_cst (integer_type_node, count),
&fork_seq);
lower_oacc_loop_marker (loc, ddvar, false,
build_int_cst (integer_type_node, done),
&join_seq);
lower_oacc_reductions (loc, clauses, place, inner,
fork, join, &fork_seq, &join_seq, ctx);
/* Append this level to head. */
gimple_seq_add_seq (head, fork_seq);
/* Prepend it to tail. */
gimple_seq_add_seq (&join_seq, *tail);
*tail = join_seq;
inner = true;
}
/* Mark the end of the sequence. */
lower_oacc_loop_marker (loc, ddvar, true, NULL_TREE, head);
lower_oacc_loop_marker (loc, ddvar, false, NULL_TREE, tail);
}
/* If exceptions are enabled, wrap the statements in BODY in a MUST_NOT_THROW
catch handler and return it. This prevents programs from violating the
structured block semantics with throws. */
static gimple_seq
maybe_catch_exception (gimple_seq body)
{
gimple *g;
tree decl;
if (!flag_exceptions)
return body;
if (lang_hooks.eh_protect_cleanup_actions != NULL)
decl = lang_hooks.eh_protect_cleanup_actions ();
else
decl = builtin_decl_explicit (BUILT_IN_TRAP);
g = gimple_build_eh_must_not_throw (decl);
g = gimple_build_try (body, gimple_seq_alloc_with_stmt (g),
GIMPLE_TRY_CATCH);
return gimple_seq_alloc_with_stmt (g);
}
/* Routines to lower OMP directives into OMP-GIMPLE. */
/* If ctx is a worksharing context inside of a cancellable parallel
region and it isn't nowait, add lhs to its GIMPLE_OMP_RETURN
and conditional branch to parallel's cancel_label to handle
cancellation in the implicit barrier. */
static void
maybe_add_implicit_barrier_cancel (omp_context *ctx, gimple *omp_return,
gimple_seq *body)
{
gcc_assert (gimple_code (omp_return) == GIMPLE_OMP_RETURN);
if (gimple_omp_return_nowait_p (omp_return))
return;
for (omp_context *outer = ctx->outer; outer; outer = outer->outer)
if (gimple_code (outer->stmt) == GIMPLE_OMP_PARALLEL
&& outer->cancellable)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL);
tree c_bool_type = TREE_TYPE (TREE_TYPE (fndecl));
tree lhs = create_tmp_var (c_bool_type);
gimple_omp_return_set_lhs (omp_return, lhs);
tree fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (c_bool_type,
boolean_false_node),
outer->cancel_label, fallthru_label);
gimple_seq_add_stmt (body, g);
gimple_seq_add_stmt (body, gimple_build_label (fallthru_label));
}
else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
return;
}
/* Find the first task_reduction or reduction clause or return NULL
if there are none. */
static inline tree
omp_task_reductions_find_first (tree clauses, enum tree_code code,
enum omp_clause_code ccode)
{
while (1)
{
clauses = omp_find_clause (clauses, ccode);
if (clauses == NULL_TREE)
return NULL_TREE;
if (ccode != OMP_CLAUSE_REDUCTION
|| code == OMP_TASKLOOP
|| OMP_CLAUSE_REDUCTION_TASK (clauses))
return clauses;
clauses = OMP_CLAUSE_CHAIN (clauses);
}
}
static void lower_omp_task_reductions (omp_context *, enum tree_code, tree,
gimple_seq *, gimple_seq *);
/* Lower the OpenMP sections directive in the current statement in GSI_P.
CTX is the enclosing OMP context for the current statement. */
static void
lower_omp_sections (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, control;
gimple_stmt_iterator tgsi;
gomp_sections *stmt;
gimple *t;
gbind *new_stmt, *bind;
gimple_seq ilist, dlist, olist, tred_dlist = NULL, clist = NULL, new_body;
stmt = as_a <gomp_sections *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
dlist = NULL;
ilist = NULL;
tree rclauses
= omp_task_reductions_find_first (gimple_omp_sections_clauses (stmt),
OMP_SECTIONS, OMP_CLAUSE_REDUCTION);
tree rtmp = NULL_TREE;
if (rclauses)
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_sections_clauses (stmt);
gimple_omp_sections_set_clauses (stmt, c);
lower_omp_task_reductions (ctx, OMP_SECTIONS,
gimple_omp_sections_clauses (stmt),
&ilist, &tred_dlist);
rclauses = c;
rtmp = make_ssa_name (type);
gimple_seq_add_stmt (&ilist, gimple_build_assign (rtmp, temp));
}
tree *clauses_ptr = gimple_omp_sections_clauses_ptr (stmt);
lower_lastprivate_conditional_clauses (clauses_ptr, ctx);
lower_rec_input_clauses (gimple_omp_sections_clauses (stmt),
&ilist, &dlist, ctx, NULL);
control = create_tmp_var (unsigned_type_node, ".section");
gimple_omp_sections_set_control (stmt, control);
new_body = gimple_omp_body (stmt);
gimple_omp_set_body (stmt, NULL);
tgsi = gsi_start (new_body);
for (; !gsi_end_p (tgsi); gsi_next (&tgsi))
{
omp_context *sctx;
gimple *sec_start;
sec_start = gsi_stmt (tgsi);
sctx = maybe_lookup_ctx (sec_start);
gcc_assert (sctx);
lower_omp (gimple_omp_body_ptr (sec_start), sctx);
gsi_insert_seq_after (&tgsi, gimple_omp_body (sec_start),
GSI_CONTINUE_LINKING);
gimple_omp_set_body (sec_start, NULL);
if (gsi_one_before_end_p (tgsi))
{
gimple_seq l = NULL;
lower_lastprivate_clauses (gimple_omp_sections_clauses (stmt), NULL,
&ilist, &l, &clist, ctx);
gsi_insert_seq_after (&tgsi, l, GSI_CONTINUE_LINKING);
gimple_omp_section_set_last (sec_start);
}
gsi_insert_after (&tgsi, gimple_build_omp_return (false),
GSI_CONTINUE_LINKING);
}
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, new_body, block);
olist = NULL;
lower_reduction_clauses (gimple_omp_sections_clauses (stmt), &olist,
&clist, ctx);
if (clist)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
gcall *g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&olist, g);
gimple_seq_add_seq (&olist, clist);
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&olist, g);
}
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, new_stmt, true);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
new_body = NULL;
gimple_seq_add_seq (&new_body, ilist);
gimple_seq_add_stmt (&new_body, stmt);
gimple_seq_add_stmt (&new_body, gimple_build_omp_sections_switch ());
gimple_seq_add_stmt (&new_body, bind);
t = gimple_build_omp_continue (control, control);
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, olist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, dlist);
new_body = maybe_catch_exception (new_body);
bool nowait = omp_find_clause (gimple_omp_sections_clauses (stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
t = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&new_body, t);
gimple_seq_add_seq (&new_body, tred_dlist);
maybe_add_implicit_barrier_cancel (ctx, t, &new_body);
if (rclauses)
OMP_CLAUSE_DECL (rclauses) = rtmp;
gimple_bind_set_body (new_stmt, new_body);
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, without a copyprivate clause:
if (GOMP_single_start ())
BODY;
[ GOMP_barrier (); ] -> unless 'nowait' is present.
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_simple (gomp_single *single_stmt, gimple_seq *pre_p)
{
location_t loc = gimple_location (single_stmt);
tree tlabel = create_artificial_label (loc);
tree flabel = create_artificial_label (loc);
gimple *call, *cond;
tree lhs, decl;
decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_START);
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (decl)));
call = gimple_build_call (decl, 0);
gimple_call_set_lhs (call, lhs);
gimple_seq_add_stmt (pre_p, call);
cond = gimple_build_cond (EQ_EXPR, lhs,
fold_convert_loc (loc, TREE_TYPE (lhs),
boolean_true_node),
tlabel, flabel);
gimple_seq_add_stmt (pre_p, cond);
gimple_seq_add_stmt (pre_p, gimple_build_label (tlabel));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
gimple_seq_add_stmt (pre_p, gimple_build_label (flabel));
}
/* A subroutine of lower_omp_single. Expand the simple form of
a GIMPLE_OMP_SINGLE, with a copyprivate clause:
#pragma omp single copyprivate (a, b, c)
Create a new structure to hold copies of 'a', 'b' and 'c' and emit:
{
if ((copyout_p = GOMP_single_copy_start ()) == NULL)
{
BODY;
copyout.a = a;
copyout.b = b;
copyout.c = c;
GOMP_single_copy_end (©out);
}
else
{
a = copyout_p->a;
b = copyout_p->b;
c = copyout_p->c;
}
GOMP_barrier ();
}
FIXME. It may be better to delay expanding the logic of this until
pass_expand_omp. The expanded logic may make the job more difficult
to a synchronization analysis pass. */
static void
lower_omp_single_copy (gomp_single *single_stmt, gimple_seq *pre_p,
omp_context *ctx)
{
tree ptr_type, t, l0, l1, l2, bfn_decl;
gimple_seq copyin_seq;
location_t loc = gimple_location (single_stmt);
ctx->sender_decl = create_tmp_var (ctx->record_type, ".omp_copy_o");
ptr_type = build_pointer_type (ctx->record_type);
ctx->receiver_decl = create_tmp_var (ptr_type, ".omp_copy_i");
l0 = create_artificial_label (loc);
l1 = create_artificial_label (loc);
l2 = create_artificial_label (loc);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_START);
t = build_call_expr_loc (loc, bfn_decl, 0);
t = fold_convert_loc (loc, ptr_type, t);
gimplify_assign (ctx->receiver_decl, t, pre_p);
t = build2 (EQ_EXPR, boolean_type_node, ctx->receiver_decl,
build_int_cst (ptr_type, 0));
t = build3 (COND_EXPR, void_type_node, t,
build_and_jump (&l0), build_and_jump (&l1));
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l0));
gimple_seq_add_seq (pre_p, gimple_omp_body (single_stmt));
copyin_seq = NULL;
lower_copyprivate_clauses (gimple_omp_single_clauses (single_stmt), pre_p,
©in_seq, ctx);
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
bfn_decl = builtin_decl_explicit (BUILT_IN_GOMP_SINGLE_COPY_END);
t = build_call_expr_loc (loc, bfn_decl, 1, t);
gimplify_and_add (t, pre_p);
t = build_and_jump (&l2);
gimplify_and_add (t, pre_p);
gimple_seq_add_stmt (pre_p, gimple_build_label (l1));
gimple_seq_add_seq (pre_p, copyin_seq);
gimple_seq_add_stmt (pre_p, gimple_build_label (l2));
}
/* Expand code for an OpenMP single directive. */
static void
lower_omp_single (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gomp_single *single_stmt = as_a <gomp_single *> (gsi_stmt (*gsi_p));
gbind *bind;
gimple_seq bind_body, bind_body_tail = NULL, dlist;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
bind_body = NULL;
dlist = NULL;
lower_rec_input_clauses (gimple_omp_single_clauses (single_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (single_stmt), ctx);
gimple_seq_add_stmt (&bind_body, single_stmt);
if (ctx->record_type)
lower_omp_single_copy (single_stmt, &bind_body, ctx);
else
lower_omp_single_simple (single_stmt, &bind_body);
gimple_omp_set_body (single_stmt, NULL);
gimple_seq_add_seq (&bind_body, dlist);
bind_body = maybe_catch_exception (bind_body);
bool nowait = omp_find_clause (gimple_omp_single_clauses (single_stmt),
OMP_CLAUSE_NOWAIT) != NULL_TREE;
gimple *g = gimple_build_omp_return (nowait);
gimple_seq_add_stmt (&bind_body_tail, g);
maybe_add_implicit_barrier_cancel (ctx, g, &bind_body_tail);
if (ctx->record_type)
{
gimple_stmt_iterator gsi = gsi_start (bind_body_tail);
tree clobber = build_clobber (ctx->record_type);
gsi_insert_after (&gsi, gimple_build_assign (ctx->sender_decl,
clobber), GSI_SAME_STMT);
}
gimple_seq_add_seq (&bind_body, bind_body_tail);
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code for an OpenMP master directive. */
static void
lower_omp_master (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block, lab = NULL, x, bfn_decl;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tseq;
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
bfn_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
x = build_call_expr_loc (loc, bfn_decl, 0);
x = build2 (EQ_EXPR, boolean_type_node, x, integer_zero_node);
x = build3 (COND_EXPR, void_type_node, x, NULL, build_and_jump (&lab));
tseq = NULL;
gimplify_and_add (x, &tseq);
gimple_bind_add_seq (bind, tseq);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_label (lab));
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Helper function for lower_omp_task_reductions. For a specific PASS
find out the current clause it should be processed, or return false
if all have been processed already. */
static inline bool
omp_task_reduction_iterate (int pass, enum tree_code code,
enum omp_clause_code ccode, tree *c, tree *decl,
tree *type, tree *next)
{
for (; *c; *c = omp_find_clause (OMP_CLAUSE_CHAIN (*c), ccode))
{
if (ccode == OMP_CLAUSE_REDUCTION
&& code != OMP_TASKLOOP
&& !OMP_CLAUSE_REDUCTION_TASK (*c))
continue;
*decl = OMP_CLAUSE_DECL (*c);
*type = TREE_TYPE (*decl);
if (TREE_CODE (*decl) == MEM_REF)
{
if (pass != 1)
continue;
}
else
{
if (omp_is_reference (*decl))
*type = TREE_TYPE (*type);
if (pass != (!TREE_CONSTANT (TYPE_SIZE_UNIT (*type))))
continue;
}
*next = omp_find_clause (OMP_CLAUSE_CHAIN (*c), ccode);
return true;
}
*decl = NULL_TREE;
*type = NULL_TREE;
*next = NULL_TREE;
return false;
}
/* Lower task_reduction and reduction clauses (the latter unless CODE is
OMP_TASKGROUP only with task modifier). Register mapping of those in
START sequence and reducing them and unregister them in the END sequence. */
static void
lower_omp_task_reductions (omp_context *ctx, enum tree_code code, tree clauses,
gimple_seq *start, gimple_seq *end)
{
enum omp_clause_code ccode
= (code == OMP_TASKGROUP
? OMP_CLAUSE_TASK_REDUCTION : OMP_CLAUSE_REDUCTION);
tree cancellable = NULL_TREE;
clauses = omp_task_reductions_find_first (clauses, code, ccode);
if (clauses == NULL_TREE)
return;
if (code == OMP_FOR || code == OMP_SECTIONS)
{
for (omp_context *outer = ctx->outer; outer; outer = outer->outer)
if (gimple_code (outer->stmt) == GIMPLE_OMP_PARALLEL
&& outer->cancellable)
{
cancellable = error_mark_node;
break;
}
else if (gimple_code (outer->stmt) != GIMPLE_OMP_TASKGROUP)
break;
}
tree record_type = lang_hooks.types.make_type (RECORD_TYPE);
tree *last = &TYPE_FIELDS (record_type);
unsigned cnt = 0;
if (cancellable)
{
tree field = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
ptr_type_node);
tree ifield = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE,
integer_type_node);
*last = field;
DECL_CHAIN (field) = ifield;
last = &DECL_CHAIN (ifield);
DECL_CONTEXT (field) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (field));
DECL_CONTEXT (ifield) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (ifield))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (ifield));
}
for (int pass = 0; pass < 2; pass++)
{
tree decl, type, next;
for (tree c = clauses;
omp_task_reduction_iterate (pass, code, ccode,
&c, &decl, &type, &next); c = next)
{
++cnt;
tree new_type = type;
if (ctx->outer)
new_type = remap_type (type, &ctx->outer->cb);
tree field
= build_decl (OMP_CLAUSE_LOCATION (c), FIELD_DECL,
DECL_P (decl) ? DECL_NAME (decl) : NULL_TREE,
new_type);
if (DECL_P (decl) && type == TREE_TYPE (decl))
{
SET_DECL_ALIGN (field, DECL_ALIGN (decl));
DECL_USER_ALIGN (field) = DECL_USER_ALIGN (decl);
TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (decl);
}
else
SET_DECL_ALIGN (field, TYPE_ALIGN (type));
DECL_CONTEXT (field) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (field))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (field));
*last = field;
last = &DECL_CHAIN (field);
tree bfield
= build_decl (OMP_CLAUSE_LOCATION (c), FIELD_DECL, NULL_TREE,
boolean_type_node);
DECL_CONTEXT (bfield) = record_type;
if (TYPE_ALIGN (record_type) < DECL_ALIGN (bfield))
SET_TYPE_ALIGN (record_type, DECL_ALIGN (bfield));
*last = bfield;
last = &DECL_CHAIN (bfield);
}
}
*last = NULL_TREE;
layout_type (record_type);
/* Build up an array which registers with the runtime all the reductions
and deregisters them at the end. Format documented in libgomp/task.c. */
tree atype = build_array_type_nelts (pointer_sized_int_node, 7 + cnt * 3);
tree avar = create_tmp_var_raw (atype);
gimple_add_tmp_var (avar);
TREE_ADDRESSABLE (avar) = 1;
tree r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_zero_node,
NULL_TREE, NULL_TREE);
tree t = build_int_cst (pointer_sized_int_node, cnt);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
gimple_seq seq = NULL;
tree sz = fold_convert (pointer_sized_int_node,
TYPE_SIZE_UNIT (record_type));
int cachesz = 64;
sz = fold_build2 (PLUS_EXPR, pointer_sized_int_node, sz,
build_int_cst (pointer_sized_int_node, cachesz - 1));
sz = fold_build2 (BIT_AND_EXPR, pointer_sized_int_node, sz,
build_int_cst (pointer_sized_int_node, ~(cachesz - 1)));
ctx->task_reductions.create (1 + cnt);
ctx->task_reduction_map = new hash_map<tree, unsigned>;
ctx->task_reductions.quick_push (TREE_CODE (sz) == INTEGER_CST
? sz : NULL_TREE);
sz = force_gimple_operand (sz, &seq, true, NULL_TREE);
gimple_seq_add_seq (start, seq);
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_one_node,
NULL_TREE, NULL_TREE);
gimple_seq_add_stmt (start, gimple_build_assign (r, sz));
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (2),
NULL_TREE, NULL_TREE);
t = build_int_cst (pointer_sized_int_node,
MAX (TYPE_ALIGN_UNIT (record_type), (unsigned) cachesz));
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (3),
NULL_TREE, NULL_TREE);
t = build_int_cst (pointer_sized_int_node, -1);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
r = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (4),
NULL_TREE, NULL_TREE);
t = build_int_cst (pointer_sized_int_node, 0);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
/* In end, build a loop that iterates from 0 to < omp_get_num_threads ()
and for each task reduction checks a bool right after the private variable
within that thread's chunk; if the bool is clear, it hasn't been
initialized and thus isn't going to be reduced nor destructed, otherwise
reduce and destruct it. */
tree idx = create_tmp_var (size_type_node);
gimple_seq_add_stmt (end, gimple_build_assign (idx, size_zero_node));
tree num_thr_sz = create_tmp_var (size_type_node);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = NULL_TREE;
gimple *g;
if (code == OMP_FOR || code == OMP_SECTIONS)
{
/* For worksharing constructs, only perform it in the master thread,
with the exception of cancelled implicit barriers - then only handle
the current thread. */
tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
t = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
tree thr_num = create_tmp_var (integer_type_node);
g = gimple_build_call (t, 0);
gimple_call_set_lhs (g, thr_num);
gimple_seq_add_stmt (end, g);
if (cancellable)
{
tree c;
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
tree lab6 = create_artificial_label (UNKNOWN_LOCATION);
lab3 = create_artificial_label (UNKNOWN_LOCATION);
if (code == OMP_FOR)
c = gimple_omp_for_clauses (ctx->stmt);
else /* if (code == OMP_SECTIONS) */
c = gimple_omp_sections_clauses (ctx->stmt);
c = OMP_CLAUSE_DECL (omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_));
cancellable = c;
g = gimple_build_cond (NE_EXPR, c, build_zero_cst (TREE_TYPE (c)),
lab5, lab6);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
g = gimple_build_assign (idx, NOP_EXPR, thr_num);
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (num_thr_sz, PLUS_EXPR, idx,
build_one_cst (TREE_TYPE (idx)));
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_goto (lab3));
gimple_seq_add_stmt (end, gimple_build_label (lab6));
}
g = gimple_build_cond (NE_EXPR, thr_num, integer_zero_node, lab2, lab4);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab4));
}
if (code != OMP_PARALLEL)
{
t = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
tree num_thr = create_tmp_var (integer_type_node);
g = gimple_build_call (t, 0);
gimple_call_set_lhs (g, num_thr);
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (num_thr_sz, NOP_EXPR, num_thr);
gimple_seq_add_stmt (end, g);
if (cancellable)
gimple_seq_add_stmt (end, gimple_build_label (lab3));
}
else
{
tree c = omp_find_clause (gimple_omp_parallel_clauses (ctx->stmt),
OMP_CLAUSE__REDUCTEMP_);
t = fold_convert (pointer_sized_int_node, OMP_CLAUSE_DECL (c));
t = fold_convert (size_type_node, t);
gimplify_assign (num_thr_sz, t, end);
}
t = build4 (ARRAY_REF, pointer_sized_int_node, avar, size_int (2),
NULL_TREE, NULL_TREE);
tree data = create_tmp_var (pointer_sized_int_node);
gimple_seq_add_stmt (end, gimple_build_assign (data, t));
gimple_seq_add_stmt (end, gimple_build_label (lab1));
tree ptr;
if (TREE_CODE (TYPE_SIZE_UNIT (record_type)) == INTEGER_CST)
ptr = create_tmp_var (build_pointer_type (record_type));
else
ptr = create_tmp_var (ptr_type_node);
gimple_seq_add_stmt (end, gimple_build_assign (ptr, NOP_EXPR, data));
tree field = TYPE_FIELDS (record_type);
cnt = 0;
if (cancellable)
field = DECL_CHAIN (DECL_CHAIN (field));
for (int pass = 0; pass < 2; pass++)
{
tree decl, type, next;
for (tree c = clauses;
omp_task_reduction_iterate (pass, code, ccode,
&c, &decl, &type, &next); c = next)
{
tree var = decl, ref;
if (TREE_CODE (decl) == MEM_REF)
{
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == POINTER_PLUS_EXPR)
var = TREE_OPERAND (var, 0);
tree v = var;
if (TREE_CODE (var) == ADDR_EXPR)
var = TREE_OPERAND (var, 0);
else if (TREE_CODE (var) == INDIRECT_REF)
var = TREE_OPERAND (var, 0);
tree orig_var = var;
if (is_variable_sized (var))
{
gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
var = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var) == INDIRECT_REF);
var = TREE_OPERAND (var, 0);
gcc_assert (DECL_P (var));
}
t = ref = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (orig_var != var)
gcc_assert (TREE_CODE (v) == ADDR_EXPR);
else if (TREE_CODE (v) == ADDR_EXPR)
t = build_fold_addr_expr (t);
else if (TREE_CODE (v) == INDIRECT_REF)
t = build_fold_indirect_ref (t);
if (TREE_CODE (TREE_OPERAND (decl, 0)) == POINTER_PLUS_EXPR)
{
tree b = TREE_OPERAND (TREE_OPERAND (decl, 0), 1);
b = maybe_lookup_decl_in_outer_ctx (b, ctx);
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, b);
}
if (!integer_zerop (TREE_OPERAND (decl, 1)))
t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t,
fold_convert (size_type_node,
TREE_OPERAND (decl, 1)));
}
else
{
t = ref = maybe_lookup_decl_in_outer_ctx (var, ctx);
if (!omp_is_reference (decl))
t = build_fold_addr_expr (t);
}
t = fold_convert (pointer_sized_int_node, t);
seq = NULL;
t = force_gimple_operand (t, &seq, true, NULL_TREE);
gimple_seq_add_seq (start, seq);
r = build4 (ARRAY_REF, pointer_sized_int_node, avar,
size_int (7 + cnt * 3), NULL_TREE, NULL_TREE);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
t = unshare_expr (byte_position (field));
t = fold_convert (pointer_sized_int_node, t);
ctx->task_reduction_map->put (c, cnt);
ctx->task_reductions.quick_push (TREE_CODE (t) == INTEGER_CST
? t : NULL_TREE);
seq = NULL;
t = force_gimple_operand (t, &seq, true, NULL_TREE);
gimple_seq_add_seq (start, seq);
r = build4 (ARRAY_REF, pointer_sized_int_node, avar,
size_int (7 + cnt * 3 + 1), NULL_TREE, NULL_TREE);
gimple_seq_add_stmt (start, gimple_build_assign (r, t));
tree bfield = DECL_CHAIN (field);
tree cond;
if (code == OMP_PARALLEL || code == OMP_FOR || code == OMP_SECTIONS)
/* In parallel or worksharing all threads unconditionally
initialize all their task reduction private variables. */
cond = boolean_true_node;
else if (TREE_TYPE (ptr) == ptr_type_node)
{
cond = build2 (POINTER_PLUS_EXPR, ptr_type_node, ptr,
unshare_expr (byte_position (bfield)));
seq = NULL;
cond = force_gimple_operand (cond, &seq, true, NULL_TREE);
gimple_seq_add_seq (end, seq);
tree pbool = build_pointer_type (TREE_TYPE (bfield));
cond = build2 (MEM_REF, TREE_TYPE (bfield), cond,
build_int_cst (pbool, 0));
}
else
cond = build3 (COMPONENT_REF, TREE_TYPE (bfield),
build_simple_mem_ref (ptr), bfield, NULL_TREE);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
tree condv = create_tmp_var (boolean_type_node);
gimple_seq_add_stmt (end, gimple_build_assign (condv, cond));
g = gimple_build_cond (NE_EXPR, condv, boolean_false_node,
lab3, lab4);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab3));
if (cancellable && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == NULL_TREE)
{
/* If this reduction doesn't need destruction and parallel
has been cancelled, there is nothing to do for this
reduction, so jump around the merge operation. */
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, cancellable,
build_zero_cst (TREE_TYPE (cancellable)),
lab4, lab5);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
}
tree new_var;
if (TREE_TYPE (ptr) == ptr_type_node)
{
new_var = build2 (POINTER_PLUS_EXPR, ptr_type_node, ptr,
unshare_expr (byte_position (field)));
seq = NULL;
new_var = force_gimple_operand (new_var, &seq, true, NULL_TREE);
gimple_seq_add_seq (end, seq);
tree pbool = build_pointer_type (TREE_TYPE (field));
new_var = build2 (MEM_REF, TREE_TYPE (field), new_var,
build_int_cst (pbool, 0));
}
else
new_var = build3 (COMPONENT_REF, TREE_TYPE (field),
build_simple_mem_ref (ptr), field, NULL_TREE);
enum tree_code rcode = OMP_CLAUSE_REDUCTION_CODE (c);
if (TREE_CODE (decl) != MEM_REF && omp_is_reference (decl))
ref = build_simple_mem_ref (ref);
/* reduction(-:var) sums up the partial results, so it acts
identically to reduction(+:var). */
if (rcode == MINUS_EXPR)
rcode = PLUS_EXPR;
if (TREE_CODE (decl) == MEM_REF)
{
tree type = TREE_TYPE (new_var);
tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
tree i = create_tmp_var (TREE_TYPE (v));
tree ptype = build_pointer_type (TREE_TYPE (type));
if (DECL_P (v))
{
v = maybe_lookup_decl_in_outer_ctx (v, ctx);
tree vv = create_tmp_var (TREE_TYPE (v));
gimplify_assign (vv, v, start);
v = vv;
}
ref = build4 (ARRAY_REF, pointer_sized_int_node, avar,
size_int (7 + cnt * 3), NULL_TREE, NULL_TREE);
new_var = build_fold_addr_expr (new_var);
new_var = fold_convert (ptype, new_var);
ref = fold_convert (ptype, ref);
tree m = create_tmp_var (ptype);
gimplify_assign (m, new_var, end);
new_var = m;
m = create_tmp_var (ptype);
gimplify_assign (m, ref, end);
ref = m;
gimplify_assign (i, build_int_cst (TREE_TYPE (v), 0), end);
tree body = create_artificial_label (UNKNOWN_LOCATION);
tree endl = create_artificial_label (UNKNOWN_LOCATION);
gimple_seq_add_stmt (end, gimple_build_label (body));
tree priv = build_simple_mem_ref (new_var);
tree out = build_simple_mem_ref (ref);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree decl_placeholder
= OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c);
tree lab6 = NULL_TREE;
if (cancellable)
{
/* If this reduction needs destruction and parallel
has been cancelled, jump around the merge operation
to the destruction. */
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
lab6 = create_artificial_label (UNKNOWN_LOCATION);
tree zero = build_zero_cst (TREE_TYPE (cancellable));
g = gimple_build_cond (NE_EXPR, cancellable, zero,
lab6, lab5);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
}
SET_DECL_VALUE_EXPR (placeholder, out);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
SET_DECL_VALUE_EXPR (decl_placeholder, priv);
DECL_HAS_VALUE_EXPR_P (decl_placeholder) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
gimple_seq_add_seq (end,
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
{
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = NULL;
}
if (cancellable)
gimple_seq_add_stmt (end, gimple_build_label (lab6));
tree x = lang_hooks.decls.omp_clause_dtor (c, priv);
if (x)
{
gimple_seq tseq = NULL;
gimplify_stmt (&x, &tseq);
gimple_seq_add_seq (end, tseq);
}
}
else
{
tree x = build2 (rcode, TREE_TYPE (out), out, priv);
out = unshare_expr (out);
gimplify_assign (out, x, end);
}
gimple *g
= gimple_build_assign (new_var, POINTER_PLUS_EXPR, new_var,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (ref, POINTER_PLUS_EXPR, ref,
TYPE_SIZE_UNIT (TREE_TYPE (type)));
gimple_seq_add_stmt (end, g);
g = gimple_build_assign (i, PLUS_EXPR, i,
build_int_cst (TREE_TYPE (i), 1));
gimple_seq_add_stmt (end, g);
g = gimple_build_cond (LE_EXPR, i, v, body, endl);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (endl));
}
else if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree oldv = NULL_TREE;
tree lab6 = NULL_TREE;
if (cancellable)
{
/* If this reduction needs destruction and parallel
has been cancelled, jump around the merge operation
to the destruction. */
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
lab6 = create_artificial_label (UNKNOWN_LOCATION);
tree zero = build_zero_cst (TREE_TYPE (cancellable));
g = gimple_build_cond (NE_EXPR, cancellable, zero,
lab6, lab5);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab5));
}
if (omp_is_reference (decl)
&& !useless_type_conversion_p (TREE_TYPE (placeholder),
TREE_TYPE (ref)))
ref = build_fold_addr_expr_loc (OMP_CLAUSE_LOCATION (c), ref);
ref = build_fold_addr_expr_loc (OMP_CLAUSE_LOCATION (c), ref);
tree refv = create_tmp_var (TREE_TYPE (ref));
gimplify_assign (refv, ref, end);
ref = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), refv);
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
tree d = maybe_lookup_decl (decl, ctx);
gcc_assert (d);
if (DECL_HAS_VALUE_EXPR_P (d))
oldv = DECL_VALUE_EXPR (d);
if (omp_is_reference (var))
{
tree v = fold_convert (TREE_TYPE (d),
build_fold_addr_expr (new_var));
SET_DECL_VALUE_EXPR (d, v);
}
else
SET_DECL_VALUE_EXPR (d, new_var);
DECL_HAS_VALUE_EXPR_P (d) = 1;
lower_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
if (oldv)
SET_DECL_VALUE_EXPR (d, oldv);
else
{
SET_DECL_VALUE_EXPR (d, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (d) = 0;
}
gimple_seq_add_seq (end, OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c));
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)
OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL;
if (cancellable)
gimple_seq_add_stmt (end, gimple_build_label (lab6));
tree x = lang_hooks.decls.omp_clause_dtor (c, new_var);
if (x)
{
gimple_seq tseq = NULL;
gimplify_stmt (&x, &tseq);
gimple_seq_add_seq (end, tseq);
}
}
else
{
tree x = build2 (rcode, TREE_TYPE (ref), ref, new_var);
ref = unshare_expr (ref);
gimplify_assign (ref, x, end);
}
gimple_seq_add_stmt (end, gimple_build_label (lab4));
++cnt;
field = DECL_CHAIN (bfield);
}
}
if (code == OMP_TASKGROUP)
{
t = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_REDUCTION_REGISTER);
g = gimple_build_call (t, 1, build_fold_addr_expr (avar));
gimple_seq_add_stmt (start, g);
}
else
{
tree c;
if (code == OMP_FOR)
c = gimple_omp_for_clauses (ctx->stmt);
else if (code == OMP_SECTIONS)
c = gimple_omp_sections_clauses (ctx->stmt);
else
c = gimple_omp_taskreg_clauses (ctx->stmt);
c = omp_find_clause (c, OMP_CLAUSE__REDUCTEMP_);
t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (c)),
build_fold_addr_expr (avar));
gimplify_assign (OMP_CLAUSE_DECL (c), t, start);
}
gimple_seq_add_stmt (end, gimple_build_assign (data, PLUS_EXPR, data, sz));
gimple_seq_add_stmt (end, gimple_build_assign (idx, PLUS_EXPR, idx,
size_one_node));
g = gimple_build_cond (NE_EXPR, idx, num_thr_sz, lab1, lab2);
gimple_seq_add_stmt (end, g);
gimple_seq_add_stmt (end, gimple_build_label (lab2));
if (code == OMP_FOR || code == OMP_SECTIONS)
{
enum built_in_function bfn
= BUILT_IN_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER;
t = builtin_decl_explicit (bfn);
tree c_bool_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (t)));
tree arg;
if (cancellable)
{
arg = create_tmp_var (c_bool_type);
gimple_seq_add_stmt (end, gimple_build_assign (arg, NOP_EXPR,
cancellable));
}
else
arg = build_int_cst (c_bool_type, 0);
g = gimple_build_call (t, 1, arg);
}
else
{
t = builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_REDUCTION_UNREGISTER);
g = gimple_build_call (t, 1, build_fold_addr_expr (avar));
}
gimple_seq_add_stmt (end, g);
t = build_constructor (atype, NULL);
TREE_THIS_VOLATILE (t) = 1;
gimple_seq_add_stmt (end, gimple_build_assign (avar, t));
}
/* Expand code for an OpenMP taskgroup directive. */
static void
lower_omp_taskgroup (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
gcall *x;
gbind *bind;
gimple_seq dseq = NULL;
tree block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
push_gimplify_context ();
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_TASKGROUP_START),
0);
gimple_bind_add_stmt (bind, x);
lower_omp_task_reductions (ctx, OMP_TASKGROUP,
gimple_omp_taskgroup_clauses (stmt),
gimple_bind_body_ptr (bind), &dseq);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
gimple_bind_add_seq (bind, dseq);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
}
/* Fold the OMP_ORDERED_CLAUSES for the OMP_ORDERED in STMT if possible. */
static void
lower_omp_ordered_clauses (gimple_stmt_iterator *gsi_p, gomp_ordered *ord_stmt,
omp_context *ctx)
{
struct omp_for_data fd;
if (!ctx->outer || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR)
return;
unsigned int len = gimple_omp_for_collapse (ctx->outer->stmt);
struct omp_for_data_loop *loops = XALLOCAVEC (struct omp_for_data_loop, len);
omp_extract_for_data (as_a <gomp_for *> (ctx->outer->stmt), &fd, loops);
if (!fd.ordered)
return;
tree *list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
tree c = gimple_omp_ordered_clauses (ord_stmt);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
&& OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
{
/* Merge depend clauses from multiple adjacent
#pragma omp ordered depend(sink:...) constructs
into one #pragma omp ordered depend(sink:...), so that
we can optimize them together. */
gimple_stmt_iterator gsi = *gsi_p;
gsi_next (&gsi);
while (!gsi_end_p (gsi))
{
gimple *stmt = gsi_stmt (gsi);
if (is_gimple_debug (stmt)
|| gimple_code (stmt) == GIMPLE_NOP)
{
gsi_next (&gsi);
continue;
}
if (gimple_code (stmt) != GIMPLE_OMP_ORDERED)
break;
gomp_ordered *ord_stmt2 = as_a <gomp_ordered *> (stmt);
c = gimple_omp_ordered_clauses (ord_stmt2);
if (c == NULL_TREE
|| OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND
|| OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
break;
while (*list_p)
list_p = &OMP_CLAUSE_CHAIN (*list_p);
*list_p = c;
gsi_remove (&gsi, true);
}
}
/* Canonicalize sink dependence clauses into one folded clause if
possible.
The basic algorithm is to create a sink vector whose first
element is the GCD of all the first elements, and whose remaining
elements are the minimum of the subsequent columns.
We ignore dependence vectors whose first element is zero because
such dependencies are known to be executed by the same thread.
We take into account the direction of the loop, so a minimum
becomes a maximum if the loop is iterating forwards. We also
ignore sink clauses where the loop direction is unknown, or where
the offsets are clearly invalid because they are not a multiple
of the loop increment.
For example:
#pragma omp for ordered(2)
for (i=0; i < N; ++i)
for (j=0; j < M; ++j)
{
#pragma omp ordered \
depend(sink:i-8,j-2) \
depend(sink:i,j-1) \ // Completely ignored because i+0.
depend(sink:i-4,j-3) \
depend(sink:i-6,j-4)
#pragma omp ordered depend(source)
}
Folded clause is:
depend(sink:-gcd(8,4,6),-min(2,3,4))
-or-
depend(sink:-2,-2)
*/
/* FIXME: Computing GCD's where the first element is zero is
non-trivial in the presence of collapsed loops. Do this later. */
if (fd.collapse > 1)
return;
wide_int *folded_deps = XALLOCAVEC (wide_int, 2 * len - 1);
/* wide_int is not a POD so it must be default-constructed. */
for (unsigned i = 0; i != 2 * len - 1; ++i)
new (static_cast<void*>(folded_deps + i)) wide_int ();
tree folded_dep = NULL_TREE;
/* TRUE if the first dimension's offset is negative. */
bool neg_offset_p = false;
list_p = gimple_omp_ordered_clauses_ptr (ord_stmt);
unsigned int i;
while ((c = *list_p) != NULL)
{
bool remove = false;
gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND);
if (OMP_CLAUSE_DEPEND_KIND (c) != OMP_CLAUSE_DEPEND_SINK)
goto next_ordered_clause;
tree vec;
for (vec = OMP_CLAUSE_DECL (c), i = 0;
vec && TREE_CODE (vec) == TREE_LIST;
vec = TREE_CHAIN (vec), ++i)
{
gcc_assert (i < len);
/* omp_extract_for_data has canonicalized the condition. */
gcc_assert (fd.loops[i].cond_code == LT_EXPR
|| fd.loops[i].cond_code == GT_EXPR);
bool forward = fd.loops[i].cond_code == LT_EXPR;
bool maybe_lexically_later = true;
/* While the committee makes up its mind, bail if we have any
non-constant steps. */
if (TREE_CODE (fd.loops[i].step) != INTEGER_CST)
goto lower_omp_ordered_ret;
tree itype = TREE_TYPE (TREE_VALUE (vec));
if (POINTER_TYPE_P (itype))
itype = sizetype;
wide_int offset = wide_int::from (wi::to_wide (TREE_PURPOSE (vec)),
TYPE_PRECISION (itype),
TYPE_SIGN (itype));
/* Ignore invalid offsets that are not multiples of the step. */
if (!wi::multiple_of_p (wi::abs (offset),
wi::abs (wi::to_wide (fd.loops[i].step)),
UNSIGNED))
{
warning_at (OMP_CLAUSE_LOCATION (c), 0,
"ignoring sink clause with offset that is not "
"a multiple of the loop step");
remove = true;
goto next_ordered_clause;
}
/* Calculate the first dimension. The first dimension of
the folded dependency vector is the GCD of the first
elements, while ignoring any first elements whose offset
is 0. */
if (i == 0)
{
/* Ignore dependence vectors whose first dimension is 0. */
if (offset == 0)
{
remove = true;
goto next_ordered_clause;
}
else
{
if (!TYPE_UNSIGNED (itype) && (forward ^ wi::neg_p (offset)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"first offset must be in opposite direction "
"of loop iterations");
goto lower_omp_ordered_ret;
}
if (forward)
offset = -offset;
neg_offset_p = forward;
/* Initialize the first time around. */
if (folded_dep == NULL_TREE)
{
folded_dep = c;
folded_deps[0] = offset;
}
else
folded_deps[0] = wi::gcd (folded_deps[0],
offset, UNSIGNED);
}
}
/* Calculate minimum for the remaining dimensions. */
else
{
folded_deps[len + i - 1] = offset;
if (folded_dep == c)
folded_deps[i] = offset;
else if (maybe_lexically_later
&& !wi::eq_p (folded_deps[i], offset))
{
if (forward ^ wi::gts_p (folded_deps[i], offset))
{
unsigned int j;
folded_dep = c;
for (j = 1; j <= i; j++)
folded_deps[j] = folded_deps[len + j - 1];
}
else
maybe_lexically_later = false;
}
}
}
gcc_assert (i == len);
remove = true;
next_ordered_clause:
if (remove)
*list_p = OMP_CLAUSE_CHAIN (c);
else
list_p = &OMP_CLAUSE_CHAIN (c);
}
if (folded_dep)
{
if (neg_offset_p)
folded_deps[0] = -folded_deps[0];
tree itype = TREE_TYPE (TREE_VALUE (OMP_CLAUSE_DECL (folded_dep)));
if (POINTER_TYPE_P (itype))
itype = sizetype;
TREE_PURPOSE (OMP_CLAUSE_DECL (folded_dep))
= wide_int_to_tree (itype, folded_deps[0]);
OMP_CLAUSE_CHAIN (folded_dep) = gimple_omp_ordered_clauses (ord_stmt);
*gimple_omp_ordered_clauses_ptr (ord_stmt) = folded_dep;
}
lower_omp_ordered_ret:
/* Ordered without clauses is #pragma omp threads, while we want
a nop instead if we remove all clauses. */
if (gimple_omp_ordered_clauses (ord_stmt) == NULL_TREE)
gsi_replace (gsi_p, gimple_build_nop (), true);
}
/* Expand code for an OpenMP ordered directive. */
static void
lower_omp_ordered (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
gimple *stmt = gsi_stmt (*gsi_p), *g;
gomp_ordered *ord_stmt = as_a <gomp_ordered *> (stmt);
gcall *x;
gbind *bind;
bool simd = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_SIMD);
/* FIXME: this should check presence of OMP_CLAUSE__SIMT_ on the enclosing
loop. */
bool maybe_simt
= simd && omp_maybe_offloaded_ctx (ctx) && omp_max_simt_vf () > 1;
bool threads = omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_THREADS);
if (omp_find_clause (gimple_omp_ordered_clauses (ord_stmt),
OMP_CLAUSE_DEPEND))
{
/* FIXME: This is needs to be moved to the expansion to verify various
conditions only testable on cfg with dominators computed, and also
all the depend clauses to be merged still might need to be available
for the runtime checks. */
if (0)
lower_omp_ordered_clauses (gsi_p, ord_stmt, ctx);
return;
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
if (simd)
{
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_START, 1,
build_int_cst (NULL_TREE, threads));
cfun->has_simduid_loops = true;
}
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_START),
0);
gimple_bind_add_stmt (bind, x);
tree counter = NULL_TREE, test = NULL_TREE, body = NULL_TREE;
if (maybe_simt)
{
counter = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_LANE, 0);
gimple_call_set_lhs (g, counter);
gimple_bind_add_stmt (bind, g);
body = create_artificial_label (UNKNOWN_LOCATION);
test = create_artificial_label (UNKNOWN_LOCATION);
gimple_bind_add_stmt (bind, gimple_build_label (body));
tree simt_pred = create_tmp_var (integer_type_node);
g = gimple_build_call_internal (IFN_GOMP_SIMT_ORDERED_PRED, 1, counter);
gimple_call_set_lhs (g, simt_pred);
gimple_bind_add_stmt (bind, g);
tree t = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, simt_pred, integer_zero_node, t, test);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (t));
}
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
if (maybe_simt)
{
gimple_bind_add_stmt (bind, gimple_build_label (test));
g = gimple_build_assign (counter, MINUS_EXPR, counter, integer_one_node);
gimple_bind_add_stmt (bind, g);
tree c = build2 (GE_EXPR, boolean_type_node, counter, integer_zero_node);
tree nonneg = create_tmp_var (integer_type_node);
gimple_seq tseq = NULL;
gimplify_assign (nonneg, fold_convert (integer_type_node, c), &tseq);
gimple_bind_add_seq (bind, tseq);
g = gimple_build_call_internal (IFN_GOMP_SIMT_VOTE_ANY, 1, nonneg);
gimple_call_set_lhs (g, nonneg);
gimple_bind_add_stmt (bind, g);
tree end = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (NE_EXPR, nonneg, integer_zero_node, body, end);
gimple_bind_add_stmt (bind, g);
gimple_bind_add_stmt (bind, gimple_build_label (end));
}
if (simd)
x = gimple_build_call_internal (IFN_GOMP_SIMD_ORDERED_END, 1,
build_int_cst (NULL_TREE, threads));
else
x = gimple_build_call (builtin_decl_explicit (BUILT_IN_GOMP_ORDERED_END),
0);
gimple_bind_add_stmt (bind, x);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* Expand code for an OpenMP scan directive and the structured block
before the scan directive. */
static void
lower_omp_scan (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
bool has_clauses
= gimple_omp_scan_clauses (as_a <gomp_scan *> (stmt)) != NULL;
tree lane = NULL_TREE;
gimple_seq before = NULL;
omp_context *octx = ctx->outer;
gcc_assert (octx);
if (octx->scan_exclusive && !has_clauses)
{
gimple_stmt_iterator gsi2 = *gsi_p;
gsi_next (&gsi2);
gimple *stmt2 = gsi_stmt (gsi2);
/* For exclusive scan, swap GIMPLE_OMP_SCAN without clauses
with following GIMPLE_OMP_SCAN with clauses, so that input_phase,
the one with exclusive clause(s), comes first. */
if (stmt2
&& gimple_code (stmt2) == GIMPLE_OMP_SCAN
&& gimple_omp_scan_clauses (as_a <gomp_scan *> (stmt2)) != NULL)
{
gsi_remove (gsi_p, false);
gsi_insert_after (gsi_p, stmt, GSI_SAME_STMT);
ctx = maybe_lookup_ctx (stmt2);
gcc_assert (ctx);
lower_omp_scan (gsi_p, ctx);
return;
}
}
bool input_phase = has_clauses ^ octx->scan_inclusive;
bool is_simd = (gimple_code (octx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (octx->stmt) == GF_OMP_FOR_KIND_SIMD);
bool is_for = (gimple_code (octx->stmt) == GIMPLE_OMP_FOR
&& gimple_omp_for_kind (octx->stmt) == GF_OMP_FOR_KIND_FOR
&& !gimple_omp_for_combined_p (octx->stmt));
bool is_for_simd = is_simd && gimple_omp_for_combined_into_p (octx->stmt);
if (is_for_simd && octx->for_simd_scan_phase)
is_simd = false;
if (is_simd)
if (tree c = omp_find_clause (gimple_omp_for_clauses (octx->stmt),
OMP_CLAUSE__SIMDUID_))
{
tree uid = OMP_CLAUSE__SIMDUID__DECL (c);
lane = create_tmp_var (unsigned_type_node);
tree t = build_int_cst (integer_type_node,
input_phase ? 1
: octx->scan_inclusive ? 2 : 3);
gimple *g
= gimple_build_call_internal (IFN_GOMP_SIMD_LANE, 2, uid, t);
gimple_call_set_lhs (g, lane);
gimple_seq_add_stmt (&before, g);
}
if (is_simd || is_for)
{
for (tree c = gimple_omp_for_clauses (octx->stmt);
c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree var = OMP_CLAUSE_DECL (c);
tree new_var = lookup_decl (var, octx);
tree val = new_var;
tree var2 = NULL_TREE;
tree var3 = NULL_TREE;
tree var4 = NULL_TREE;
tree lane0 = NULL_TREE;
tree new_vard = new_var;
if (omp_is_reference (var))
{
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
val = new_var;
}
if (DECL_HAS_VALUE_EXPR_P (new_vard))
{
val = DECL_VALUE_EXPR (new_vard);
if (new_vard != new_var)
{
gcc_assert (TREE_CODE (val) == ADDR_EXPR);
val = TREE_OPERAND (val, 0);
}
if (TREE_CODE (val) == ARRAY_REF
&& VAR_P (TREE_OPERAND (val, 0)))
{
tree v = TREE_OPERAND (val, 0);
if (lookup_attribute ("omp simd array",
DECL_ATTRIBUTES (v)))
{
val = unshare_expr (val);
lane0 = TREE_OPERAND (val, 1);
TREE_OPERAND (val, 1) = lane;
var2 = lookup_decl (v, octx);
if (octx->scan_exclusive)
var4 = lookup_decl (var2, octx);
if (input_phase
&& OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
var3 = maybe_lookup_decl (var4 ? var4 : var2, octx);
if (!input_phase)
{
var2 = build4 (ARRAY_REF, TREE_TYPE (val),
var2, lane, NULL_TREE, NULL_TREE);
TREE_THIS_NOTRAP (var2) = 1;
if (octx->scan_exclusive)
{
var4 = build4 (ARRAY_REF, TREE_TYPE (val),
var4, lane, NULL_TREE,
NULL_TREE);
TREE_THIS_NOTRAP (var4) = 1;
}
}
else
var2 = val;
}
}
gcc_assert (var2);
}
else
{
var2 = build_outer_var_ref (var, octx);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
var3 = maybe_lookup_decl (new_vard, octx);
if (var3 == new_vard || var3 == NULL_TREE)
var3 = NULL_TREE;
else if (is_simd && octx->scan_exclusive && !input_phase)
{
var4 = maybe_lookup_decl (var3, octx);
if (var4 == var3 || var4 == NULL_TREE)
{
if (TREE_ADDRESSABLE (TREE_TYPE (new_var)))
{
var4 = var3;
var3 = NULL_TREE;
}
else
var4 = NULL_TREE;
}
}
}
if (is_simd
&& octx->scan_exclusive
&& !input_phase
&& var4 == NULL_TREE)
var4 = create_tmp_var (TREE_TYPE (val));
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
if (input_phase)
{
if (var3)
{
/* If we've added a separate identity element
variable, copy it over into val. */
tree x = lang_hooks.decls.omp_clause_assign_op (c, val,
var3);
gimplify_and_add (x, &before);
}
else if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
/* Otherwise, assign to it the identity element. */
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
if (is_for)
tseq = copy_gimple_seq_and_replace_locals (tseq);
tree ref = build_outer_var_ref (var, octx);
tree x = (DECL_HAS_VALUE_EXPR_P (new_vard)
? DECL_VALUE_EXPR (new_vard) : NULL_TREE);
if (x)
{
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
SET_DECL_VALUE_EXPR (new_vard, val);
}
SET_DECL_VALUE_EXPR (placeholder, ref);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, octx);
if (x)
SET_DECL_VALUE_EXPR (new_vard, x);
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
gimple_seq_add_seq (&before, tseq);
if (is_simd)
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
}
}
else if (is_simd)
{
tree x;
if (octx->scan_exclusive)
{
tree v4 = unshare_expr (var4);
tree v2 = unshare_expr (var2);
x = lang_hooks.decls.omp_clause_assign_op (c, v4, v2);
gimplify_and_add (x, &before);
}
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
x = (DECL_HAS_VALUE_EXPR_P (new_vard)
? DECL_VALUE_EXPR (new_vard) : NULL_TREE);
tree vexpr = val;
if (x && new_vard != new_var)
vexpr = build_fold_addr_expr_loc (clause_loc, val);
if (x)
SET_DECL_VALUE_EXPR (new_vard, vexpr);
SET_DECL_VALUE_EXPR (placeholder, var2);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, octx);
gimple_seq_add_seq (&before, tseq);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
if (x)
SET_DECL_VALUE_EXPR (new_vard, x);
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (octx->scan_inclusive)
{
x = lang_hooks.decls.omp_clause_assign_op (c, val,
var2);
gimplify_and_add (x, &before);
}
else if (lane0 == NULL_TREE)
{
x = lang_hooks.decls.omp_clause_assign_op (c, val,
var4);
gimplify_and_add (x, &before);
}
}
}
else
{
if (input_phase)
{
/* input phase. Set val to initializer before
the body. */
tree x = omp_reduction_init (c, TREE_TYPE (new_var));
gimplify_assign (val, x, &before);
}
else if (is_simd)
{
/* scan phase. */
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
if (code == MINUS_EXPR)
code = PLUS_EXPR;
tree x = build2 (code, TREE_TYPE (var2),
unshare_expr (var2), unshare_expr (val));
if (octx->scan_inclusive)
{
gimplify_assign (unshare_expr (var2), x, &before);
gimplify_assign (val, var2, &before);
}
else
{
gimplify_assign (unshare_expr (var4),
unshare_expr (var2), &before);
gimplify_assign (var2, x, &before);
if (lane0 == NULL_TREE)
gimplify_assign (val, var4, &before);
}
}
}
if (octx->scan_exclusive && !input_phase && lane0)
{
tree vexpr = unshare_expr (var4);
TREE_OPERAND (vexpr, 1) = lane0;
if (new_vard != new_var)
vexpr = build_fold_addr_expr_loc (clause_loc, vexpr);
SET_DECL_VALUE_EXPR (new_vard, vexpr);
}
}
}
if (is_simd && !is_for_simd)
{
gsi_insert_seq_after (gsi_p, gimple_omp_body (stmt), GSI_SAME_STMT);
gsi_insert_seq_after (gsi_p, before, GSI_SAME_STMT);
gsi_replace (gsi_p, gimple_build_nop (), true);
return;
}
lower_omp (gimple_omp_body_ptr (stmt), octx);
if (before)
{
gimple_stmt_iterator gsi = gsi_start_1 (gimple_omp_body_ptr (stmt));
gsi_insert_seq_before (&gsi, before, GSI_SAME_STMT);
}
}
/* Gimplify a GIMPLE_OMP_CRITICAL statement. This is a relatively simple
substitution of a couple of function calls. But in the NAMED case,
requires that languages coordinate a symbol name. It is therefore
best put here in common code. */
static GTY(()) hash_map<tree, tree> *critical_name_mutexes;
static void
lower_omp_critical (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree block;
tree name, lock, unlock;
gomp_critical *stmt = as_a <gomp_critical *> (gsi_stmt (*gsi_p));
gbind *bind;
location_t loc = gimple_location (stmt);
gimple_seq tbody;
name = gimple_omp_critical_name (stmt);
if (name)
{
tree decl;
if (!critical_name_mutexes)
critical_name_mutexes = hash_map<tree, tree>::create_ggc (10);
tree *n = critical_name_mutexes->get (name);
if (n == NULL)
{
char *new_str;
decl = create_tmp_var_raw (ptr_type_node);
new_str = ACONCAT ((".gomp_critical_user_",
IDENTIFIER_POINTER (name), NULL));
DECL_NAME (decl) = get_identifier (new_str);
TREE_PUBLIC (decl) = 1;
TREE_STATIC (decl) = 1;
DECL_COMMON (decl) = 1;
DECL_ARTIFICIAL (decl) = 1;
DECL_IGNORED_P (decl) = 1;
varpool_node::finalize_decl (decl);
critical_name_mutexes->put (name, decl);
}
else
decl = *n;
/* If '#pragma omp critical' is inside offloaded region or
inside function marked as offloadable, the symbol must be
marked as offloadable too. */
omp_context *octx;
if (cgraph_node::get (current_function_decl)->offloadable)
varpool_node::get_create (decl)->offloadable = 1;
else
for (octx = ctx->outer; octx; octx = octx->outer)
if (is_gimple_omp_offloaded (octx->stmt))
{
varpool_node::get_create (decl)->offloadable = 1;
break;
}
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_START);
lock = build_call_expr_loc (loc, lock, 1,
build_fold_addr_expr_loc (loc, decl));
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_NAME_END);
unlock = build_call_expr_loc (loc, unlock, 1,
build_fold_addr_expr_loc (loc, decl));
}
else
{
lock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_START);
lock = build_call_expr_loc (loc, lock, 0);
unlock = builtin_decl_explicit (BUILT_IN_GOMP_CRITICAL_END);
unlock = build_call_expr_loc (loc, unlock, 0);
}
push_gimplify_context ();
block = make_node (BLOCK);
bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_bind_add_stmt (bind, stmt);
tbody = gimple_bind_body (bind);
gimplify_and_add (lock, &tbody);
gimple_bind_set_body (bind, tbody);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_omp_set_body (stmt, maybe_catch_exception (gimple_omp_body (stmt)));
gimple_bind_add_seq (bind, gimple_omp_body (stmt));
gimple_omp_set_body (stmt, NULL);
tbody = gimple_bind_body (bind);
gimplify_and_add (unlock, &tbody);
gimple_bind_set_body (bind, tbody);
gimple_bind_add_stmt (bind, gimple_build_omp_return (true));
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = gimple_bind_vars (bind);
}
/* A subroutine of lower_omp_for. Generate code to emit the predicate
for a lastprivate clause. Given a loop control predicate of (V
cond N2), we gate the clause on (!(V cond N2)). The lowered form
is appended to *DLIST, iterator initialization is appended to
*BODY_P. *CLIST is for lastprivate(conditional:) code that needs
to be emitted in a critical section. */
static void
lower_omp_for_lastprivate (struct omp_for_data *fd, gimple_seq *body_p,
gimple_seq *dlist, gimple_seq *clist,
struct omp_context *ctx)
{
tree clauses, cond, vinit;
enum tree_code cond_code;
gimple_seq stmts;
cond_code = fd->loop.cond_code;
cond_code = cond_code == LT_EXPR ? GE_EXPR : LE_EXPR;
/* When possible, use a strict equality expression. This can let VRP
type optimizations deduce the value and remove a copy. */
if (tree_fits_shwi_p (fd->loop.step))
{
HOST_WIDE_INT step = tree_to_shwi (fd->loop.step);
if (step == 1 || step == -1)
cond_code = EQ_EXPR;
}
if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_GRID_LOOP
|| gimple_omp_for_grid_phony (fd->for_stmt))
cond = omp_grid_lastprivate_predicate (fd);
else
{
tree n2 = fd->loop.n2;
if (fd->collapse > 1
&& TREE_CODE (n2) != INTEGER_CST
&& gimple_omp_for_combined_into_p (fd->for_stmt))
{
struct omp_context *taskreg_ctx = NULL;
if (gimple_code (ctx->outer->stmt) == GIMPLE_OMP_FOR)
{
gomp_for *gfor = as_a <gomp_for *> (ctx->outer->stmt);
if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_DISTRIBUTE)
{
if (gimple_omp_for_combined_into_p (gfor))
{
gcc_assert (ctx->outer->outer
&& is_parallel_ctx (ctx->outer->outer));
taskreg_ctx = ctx->outer->outer;
}
else
{
struct omp_for_data outer_fd;
omp_extract_for_data (gfor, &outer_fd, NULL);
n2 = fold_convert (TREE_TYPE (n2), outer_fd.loop.n2);
}
}
else if (gimple_omp_for_kind (gfor) == GF_OMP_FOR_KIND_TASKLOOP)
taskreg_ctx = ctx->outer->outer;
}
else if (is_taskreg_ctx (ctx->outer))
taskreg_ctx = ctx->outer;
if (taskreg_ctx)
{
int i;
tree taskreg_clauses
= gimple_omp_taskreg_clauses (taskreg_ctx->stmt);
tree innerc = omp_find_clause (taskreg_clauses,
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
for (i = 0; i < fd->collapse; i++)
{
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
gcc_assert (innerc);
}
innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
OMP_CLAUSE__LOOPTEMP_);
if (innerc)
n2 = fold_convert (TREE_TYPE (n2),
lookup_decl (OMP_CLAUSE_DECL (innerc),
taskreg_ctx));
}
}
cond = build2 (cond_code, boolean_type_node, fd->loop.v, n2);
}
clauses = gimple_omp_for_clauses (fd->for_stmt);
stmts = NULL;
lower_lastprivate_clauses (clauses, cond, body_p, &stmts, clist, ctx);
if (!gimple_seq_empty_p (stmts))
{
gimple_seq_add_seq (&stmts, *dlist);
*dlist = stmts;
/* Optimize: v = 0; is usually cheaper than v = some_other_constant. */
vinit = fd->loop.n1;
if (cond_code == EQ_EXPR
&& tree_fits_shwi_p (fd->loop.n2)
&& ! integer_zerop (fd->loop.n2))
vinit = build_int_cst (TREE_TYPE (fd->loop.v), 0);
else
vinit = unshare_expr (vinit);
/* Initialize the iterator variable, so that threads that don't execute
any iterations don't execute the lastprivate clauses by accident. */
gimplify_assign (fd->loop.v, vinit, body_p);
}
}
/* Callback for walk_gimple_seq. Find #pragma omp scan statement. */
static tree
omp_find_scan (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_FOR:
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_SIMD
&& gimple_omp_for_combined_into_p (stmt))
*handled_ops_p = false;
break;
case GIMPLE_OMP_SCAN:
*(gimple_stmt_iterator *) (wi->info) = *gsi_p;
return integer_zero_node;
default:
break;
}
return NULL;
}
/* Helper function for lower_omp_for, add transformations for a worksharing
loop with scan directives inside of it.
For worksharing loop not combined with simd, transform:
#pragma omp for reduction(inscan,+:r) private(i)
for (i = 0; i < n; i = i + 1)
{
{
update (r);
}
#pragma omp scan inclusive(r)
{
use (r);
}
}
into two worksharing loops + code to merge results:
num_threads = omp_get_num_threads ();
thread_num = omp_get_thread_num ();
if (thread_num == 0) goto <D.2099>; else goto <D.2100>;
<D.2099>:
var2 = r;
goto <D.2101>;
<D.2100>:
// For UDRs this is UDR init, or if ctors are needed, copy from
// var3 that has been constructed to contain the neutral element.
var2 = 0;
<D.2101>:
ivar = 0;
// The _scantemp_ clauses will arrange for rpriva to be initialized to
// a shared array with num_threads elements and rprivb to a local array
// number of elements equal to the number of (contiguous) iterations the
// current thread will perform. controlb and controlp variables are
// temporaries to handle deallocation of rprivb at the end of second
// GOMP_FOR.
#pragma omp for _scantemp_(rpriva) _scantemp_(rprivb) _scantemp_(controlb) \
_scantemp_(controlp) reduction(inscan,+:r) private(i) nowait
for (i = 0; i < n; i = i + 1)
{
{
// For UDRs this is UDR init or copy from var3.
r = 0;
// This is the input phase from user code.
update (r);
}
{
// For UDRs this is UDR merge.
var2 = var2 + r;
// Rather than handing it over to the user, save to local thread's
// array.
rprivb[ivar] = var2;
// For exclusive scan, the above two statements are swapped.
ivar = ivar + 1;
}
}
// And remember the final value from this thread's into the shared
// rpriva array.
rpriva[(sizetype) thread_num] = var2;
// If more than one thread, compute using Work-Efficient prefix sum
// the inclusive parallel scan of the rpriva array.
if (num_threads > 1) goto <D.2102>; else goto <D.2103>;
<D.2102>:
GOMP_barrier ();
down = 0;
k = 1;
num_threadsu = (unsigned int) num_threads;
thread_numup1 = (unsigned int) thread_num + 1;
<D.2108>:
twok = k << 1;
if (twok > num_threadsu) goto <D.2110>; else goto <D.2111>;
<D.2110>:
down = 4294967295;
k = k >> 1;
if (k == num_threadsu) goto <D.2112>; else goto <D.2111>;
<D.2112>:
k = k >> 1;
<D.2111>:
twok = k << 1;
cplx = .MUL_OVERFLOW (thread_nump1, twok);
mul = REALPART_EXPR <cplx>;
ovf = IMAGPART_EXPR <cplx>;
if (ovf == 0) goto <D.2116>; else goto <D.2117>;
<D.2116>:
andv = k & down;
andvm1 = andv + 4294967295;
l = mul + andvm1;
if (l < num_threadsu) goto <D.2120>; else goto <D.2117>;
<D.2120>:
// For UDRs this is UDR merge, performed using var2 variable as temporary,
// i.e. var2 = rpriva[l - k]; UDR merge (var2, rpriva[l]); rpriva[l] = var2;
rpriva[l] = rpriva[l - k] + rpriva[l];
<D.2117>:
if (down == 0) goto <D.2121>; else goto <D.2122>;
<D.2121>:
k = k << 1;
goto <D.2123>;
<D.2122>:
k = k >> 1;
<D.2123>:
GOMP_barrier ();
if (k != 0) goto <D.2108>; else goto <D.2103>;
<D.2103>:
if (thread_num == 0) goto <D.2124>; else goto <D.2125>;
<D.2124>:
// For UDRs this is UDR init or copy from var3.
var2 = 0;
goto <D.2126>;
<D.2125>:
var2 = rpriva[thread_num - 1];
<D.2126>:
ivar = 0;
#pragma omp for _scantemp_(controlb) _scantemp_(controlp) \
reduction(inscan,+:r) private(i)
for (i = 0; i < n; i = i + 1)
{
{
// For UDRs, this is r = var2; UDR merge (r, rprivb[ivar]);
r = var2 + rprivb[ivar];
}
{
// This is the scan phase from user code.
use (r);
// Plus a bump of the iterator.
ivar = ivar + 1;
}
} */
static void
lower_omp_for_scan (gimple_seq *body_p, gimple_seq *dlist, gomp_for *stmt,
struct omp_for_data *fd, omp_context *ctx)
{
bool is_for_simd = gimple_omp_for_combined_p (stmt);
gcc_assert (ctx->scan_inclusive || ctx->scan_exclusive);
gimple_seq body = gimple_omp_body (stmt);
gimple_stmt_iterator input1_gsi = gsi_none ();
struct walk_stmt_info wi;
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input1_gsi;
walk_gimple_seq_mod (&body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input1_gsi));
gimple *input_stmt1 = gsi_stmt (input1_gsi);
gimple_stmt_iterator gsi = input1_gsi;
gsi_next (&gsi);
gimple_stmt_iterator scan1_gsi = gsi;
gimple *scan_stmt1 = gsi_stmt (gsi);
gcc_assert (scan_stmt1 && gimple_code (scan_stmt1) == GIMPLE_OMP_SCAN);
gimple_seq input_body = gimple_omp_body (input_stmt1);
gimple_seq scan_body = gimple_omp_body (scan_stmt1);
gimple_omp_set_body (input_stmt1, NULL);
gimple_omp_set_body (scan_stmt1, NULL);
gimple_omp_set_body (stmt, NULL);
gomp_for *new_stmt = as_a <gomp_for *> (gimple_copy (stmt));
gimple_seq new_body = copy_gimple_seq_and_replace_locals (body);
gimple_omp_set_body (stmt, body);
gimple_omp_set_body (input_stmt1, input_body);
gimple_stmt_iterator input2_gsi = gsi_none ();
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input2_gsi;
walk_gimple_seq_mod (&new_body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input2_gsi));
gimple *input_stmt2 = gsi_stmt (input2_gsi);
gsi = input2_gsi;
gsi_next (&gsi);
gimple_stmt_iterator scan2_gsi = gsi;
gimple *scan_stmt2 = gsi_stmt (gsi);
gcc_assert (scan_stmt2 && gimple_code (scan_stmt2) == GIMPLE_OMP_SCAN);
gimple_omp_set_body (scan_stmt2, scan_body);
gimple_stmt_iterator input3_gsi = gsi_none ();
gimple_stmt_iterator scan3_gsi = gsi_none ();
gimple_stmt_iterator input4_gsi = gsi_none ();
gimple_stmt_iterator scan4_gsi = gsi_none ();
gimple *input_stmt3 = NULL, *scan_stmt3 = NULL;
gimple *input_stmt4 = NULL, *scan_stmt4 = NULL;
omp_context *input_simd_ctx = NULL, *scan_simd_ctx = NULL;
if (is_for_simd)
{
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input3_gsi;
walk_gimple_seq_mod (&input_body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input3_gsi));
input_stmt3 = gsi_stmt (input3_gsi);
gsi = input3_gsi;
gsi_next (&gsi);
scan3_gsi = gsi;
scan_stmt3 = gsi_stmt (gsi);
gcc_assert (scan_stmt3 && gimple_code (scan_stmt3) == GIMPLE_OMP_SCAN);
memset (&wi, 0, sizeof (wi));
wi.val_only = true;
wi.info = (void *) &input4_gsi;
walk_gimple_seq_mod (&scan_body, omp_find_scan, NULL, &wi);
gcc_assert (!gsi_end_p (input4_gsi));
input_stmt4 = gsi_stmt (input4_gsi);
gsi = input4_gsi;
gsi_next (&gsi);
scan4_gsi = gsi;
scan_stmt4 = gsi_stmt (gsi);
gcc_assert (scan_stmt4 && gimple_code (scan_stmt4) == GIMPLE_OMP_SCAN);
input_simd_ctx = maybe_lookup_ctx (input_stmt3)->outer;
scan_simd_ctx = maybe_lookup_ctx (input_stmt4)->outer;
}
tree num_threads = create_tmp_var (integer_type_node);
tree thread_num = create_tmp_var (integer_type_node);
tree nthreads_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
tree threadnum_decl = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
gimple *g = gimple_build_call (nthreads_decl, 0);
gimple_call_set_lhs (g, num_threads);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_call (threadnum_decl, 0);
gimple_call_set_lhs (g, thread_num);
gimple_seq_add_stmt (body_p, g);
tree ivar = create_tmp_var (sizetype);
tree new_clauses1 = NULL_TREE, new_clauses2 = NULL_TREE;
tree *cp1 = &new_clauses1, *cp2 = &new_clauses2;
tree k = create_tmp_var (unsigned_type_node);
tree l = create_tmp_var (unsigned_type_node);
gimple_seq clist = NULL, mdlist = NULL;
gimple_seq thr01_list = NULL, thrn1_list = NULL;
gimple_seq thr02_list = NULL, thrn2_list = NULL;
gimple_seq scan1_list = NULL, input2_list = NULL;
gimple_seq last_list = NULL, reduc_list = NULL;
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
&& OMP_CLAUSE_REDUCTION_INSCAN (c))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree var = OMP_CLAUSE_DECL (c);
tree new_var = lookup_decl (var, ctx);
tree var3 = NULL_TREE;
tree new_vard = new_var;
if (omp_is_reference (var))
new_var = build_simple_mem_ref_loc (clause_loc, new_var);
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
var3 = maybe_lookup_decl (new_vard, ctx);
if (var3 == new_vard)
var3 = NULL_TREE;
}
tree ptype = build_pointer_type (TREE_TYPE (new_var));
tree rpriva = create_tmp_var (ptype);
tree nc = build_omp_clause (clause_loc, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = rpriva;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
tree rprivb = create_tmp_var (ptype);
nc = build_omp_clause (clause_loc, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = rprivb;
OMP_CLAUSE__SCANTEMP__ALLOC (nc) = 1;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
tree var2 = create_tmp_var_raw (TREE_TYPE (new_var));
if (new_vard != new_var)
TREE_ADDRESSABLE (var2) = 1;
gimple_add_tmp_var (var2);
tree x = fold_convert_loc (clause_loc, sizetype, thread_num);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rpriva_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_build2_loc (clause_loc, PLUS_EXPR, integer_type_node,
thread_num, integer_minus_one_node);
x = fold_convert_loc (clause_loc, sizetype, x);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rprivam1_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_convert_loc (clause_loc, sizetype, l);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rprival_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_build2_loc (clause_loc, MINUS_EXPR, unsigned_type_node, l, k);
x = fold_convert_loc (clause_loc, sizetype, x);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, x,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rpriva), rpriva, x);
tree rprivalmk_ref = build_simple_mem_ref_loc (clause_loc, x);
x = fold_build2_loc (clause_loc, MULT_EXPR, sizetype, ivar,
TYPE_SIZE_UNIT (TREE_TYPE (ptype)));
x = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (rprivb), rprivb, x);
tree rprivb_ref = build_simple_mem_ref_loc (clause_loc, x);
tree var4 = is_for_simd ? new_var : var2;
tree var5 = NULL_TREE, var6 = NULL_TREE;
if (is_for_simd)
{
var5 = lookup_decl (var, input_simd_ctx);
var6 = lookup_decl (var, scan_simd_ctx);
if (new_vard != new_var)
{
var5 = build_simple_mem_ref_loc (clause_loc, var5);
var6 = build_simple_mem_ref_loc (clause_loc, var6);
}
}
if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
{
tree placeholder = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c);
tree val = var2;
x = lang_hooks.decls.omp_clause_default_ctor
(c, var2, build_outer_var_ref (var, ctx));
if (x)
gimplify_and_add (x, &clist);
x = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, unshare_expr (var4),
x);
gimplify_and_add (x, &thr01_list);
tree y = (DECL_HAS_VALUE_EXPR_P (new_vard)
? DECL_VALUE_EXPR (new_vard) : NULL_TREE);
if (var3)
{
x = unshare_expr (var4);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var3);
gimplify_and_add (x, &thrn1_list);
x = unshare_expr (var4);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var3);
gimplify_and_add (x, &thr02_list);
}
else if (OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c))
{
/* Otherwise, assign to it the identity element. */
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
if (!is_for_simd)
{
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
SET_DECL_VALUE_EXPR (new_vard, val);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
}
SET_DECL_VALUE_EXPR (placeholder, error_mark_node);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&thrn1_list, tseq);
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c);
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&thr02_list, tseq);
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c) = NULL;
if (y)
SET_DECL_VALUE_EXPR (new_vard, y);
else
{
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
}
}
x = unshare_expr (var4);
x = lang_hooks.decls.omp_clause_assign_op (c, x, rprivam1_ref);
gimplify_and_add (x, &thrn2_list);
if (is_for_simd)
{
x = unshare_expr (rprivb_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var5);
gimplify_and_add (x, &scan1_list);
}
else
{
if (ctx->scan_exclusive)
{
x = unshare_expr (rprivb_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var2);
gimplify_and_add (x, &scan1_list);
}
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
SET_DECL_VALUE_EXPR (placeholder, var2);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, ctx);
gimple_seq_add_seq (&scan1_list, tseq);
if (ctx->scan_inclusive)
{
x = unshare_expr (rprivb_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var2);
gimplify_and_add (x, &scan1_list);
}
}
x = unshare_expr (rpriva_ref);
x = lang_hooks.decls.omp_clause_assign_op (c, x,
unshare_expr (var4));
gimplify_and_add (x, &mdlist);
x = unshare_expr (is_for_simd ? var6 : new_var);
x = lang_hooks.decls.omp_clause_assign_op (c, x, var4);
gimplify_and_add (x, &input2_list);
val = rprivb_ref;
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
gimple_seq tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
SET_DECL_VALUE_EXPR (new_vard, val);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
if (is_for_simd)
{
SET_DECL_VALUE_EXPR (placeholder, var6);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
}
else
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
lower_omp (&tseq, ctx);
if (y)
SET_DECL_VALUE_EXPR (new_vard, y);
else
{
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
}
if (!is_for_simd)
{
SET_DECL_VALUE_EXPR (placeholder, new_var);
DECL_HAS_VALUE_EXPR_P (placeholder) = 1;
lower_omp (&tseq, ctx);
}
gimple_seq_add_seq (&input2_list, tseq);
x = build_outer_var_ref (var, ctx);
x = lang_hooks.decls.omp_clause_assign_op (c, x, rpriva_ref);
gimplify_and_add (x, &last_list);
x = lang_hooks.decls.omp_clause_assign_op (c, var2, rprivalmk_ref);
gimplify_and_add (x, &reduc_list);
tseq = OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c);
tseq = copy_gimple_seq_and_replace_locals (tseq);
val = rprival_ref;
if (new_vard != new_var)
val = build_fold_addr_expr_loc (clause_loc, val);
SET_DECL_VALUE_EXPR (new_vard, val);
DECL_HAS_VALUE_EXPR_P (new_vard) = 1;
SET_DECL_VALUE_EXPR (placeholder, var2);
lower_omp (&tseq, ctx);
OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c) = NULL;
SET_DECL_VALUE_EXPR (placeholder, NULL_TREE);
DECL_HAS_VALUE_EXPR_P (placeholder) = 0;
if (y)
SET_DECL_VALUE_EXPR (new_vard, y);
else
{
DECL_HAS_VALUE_EXPR_P (new_vard) = 0;
SET_DECL_VALUE_EXPR (new_vard, NULL_TREE);
}
gimple_seq_add_seq (&reduc_list, tseq);
x = lang_hooks.decls.omp_clause_assign_op (c, rprival_ref, var2);
gimplify_and_add (x, &reduc_list);
x = lang_hooks.decls.omp_clause_dtor (c, var2);
if (x)
gimplify_and_add (x, dlist);
}
else
{
x = build_outer_var_ref (var, ctx);
gimplify_assign (unshare_expr (var4), x, &thr01_list);
x = omp_reduction_init (c, TREE_TYPE (new_var));
gimplify_assign (unshare_expr (var4), unshare_expr (x),
&thrn1_list);
gimplify_assign (unshare_expr (var4), x, &thr02_list);
gimplify_assign (unshare_expr (var4), rprivam1_ref, &thrn2_list);
enum tree_code code = OMP_CLAUSE_REDUCTION_CODE (c);
if (code == MINUS_EXPR)
code = PLUS_EXPR;
if (is_for_simd)
gimplify_assign (unshare_expr (rprivb_ref), var5, &scan1_list);
else
{
if (ctx->scan_exclusive)
gimplify_assign (unshare_expr (rprivb_ref), var2,
&scan1_list);
x = build2 (code, TREE_TYPE (new_var), var2, new_var);
gimplify_assign (var2, x, &scan1_list);
if (ctx->scan_inclusive)
gimplify_assign (unshare_expr (rprivb_ref), var2,
&scan1_list);
}
gimplify_assign (unshare_expr (rpriva_ref), unshare_expr (var4),
&mdlist);
x = build2 (code, TREE_TYPE (new_var), var4, rprivb_ref);
gimplify_assign (is_for_simd ? var6 : new_var, x, &input2_list);
gimplify_assign (build_outer_var_ref (var, ctx), rpriva_ref,
&last_list);
x = build2 (code, TREE_TYPE (new_var), rprivalmk_ref,
unshare_expr (rprival_ref));
gimplify_assign (rprival_ref, x, &reduc_list);
}
}
g = gimple_build_assign (ivar, PLUS_EXPR, ivar, size_one_node);
gimple_seq_add_stmt (&scan1_list, g);
g = gimple_build_assign (ivar, PLUS_EXPR, ivar, size_one_node);
gimple_seq_add_stmt (gimple_omp_body_ptr (is_for_simd
? scan_stmt4 : scan_stmt2), g);
tree controlb = create_tmp_var (boolean_type_node);
tree controlp = create_tmp_var (ptr_type_node);
tree nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlb;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlp;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp1 = nc;
cp1 = &OMP_CLAUSE_CHAIN (nc);
nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlb;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp2 = nc;
cp2 = &OMP_CLAUSE_CHAIN (nc);
nc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__SCANTEMP_);
OMP_CLAUSE_DECL (nc) = controlp;
OMP_CLAUSE__SCANTEMP__CONTROL (nc) = 1;
*cp2 = nc;
cp2 = &OMP_CLAUSE_CHAIN (nc);
*cp1 = gimple_omp_for_clauses (stmt);
gimple_omp_for_set_clauses (stmt, new_clauses1);
*cp2 = gimple_omp_for_clauses (new_stmt);
gimple_omp_for_set_clauses (new_stmt, new_clauses2);
if (is_for_simd)
{
gimple_seq_add_seq (gimple_omp_body_ptr (scan_stmt3), scan1_list);
gimple_seq_add_seq (gimple_omp_body_ptr (input_stmt4), input2_list);
gsi_insert_seq_after (&input3_gsi, gimple_omp_body (input_stmt3),
GSI_SAME_STMT);
gsi_remove (&input3_gsi, true);
gsi_insert_seq_after (&scan3_gsi, gimple_omp_body (scan_stmt3),
GSI_SAME_STMT);
gsi_remove (&scan3_gsi, true);
gsi_insert_seq_after (&input4_gsi, gimple_omp_body (input_stmt4),
GSI_SAME_STMT);
gsi_remove (&input4_gsi, true);
gsi_insert_seq_after (&scan4_gsi, gimple_omp_body (scan_stmt4),
GSI_SAME_STMT);
gsi_remove (&scan4_gsi, true);
}
else
{
gimple_omp_set_body (scan_stmt1, scan1_list);
gimple_omp_set_body (input_stmt2, input2_list);
}
gsi_insert_seq_after (&input1_gsi, gimple_omp_body (input_stmt1),
GSI_SAME_STMT);
gsi_remove (&input1_gsi, true);
gsi_insert_seq_after (&scan1_gsi, gimple_omp_body (scan_stmt1),
GSI_SAME_STMT);
gsi_remove (&scan1_gsi, true);
gsi_insert_seq_after (&input2_gsi, gimple_omp_body (input_stmt2),
GSI_SAME_STMT);
gsi_remove (&input2_gsi, true);
gsi_insert_seq_after (&scan2_gsi, gimple_omp_body (scan_stmt2),
GSI_SAME_STMT);
gsi_remove (&scan2_gsi, true);
gimple_seq_add_seq (body_p, clist);
tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, thread_num, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thr01_list);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thrn1_list);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (ivar, size_zero_node);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_stmt (body_p, stmt);
gimple_seq_add_seq (body_p, body);
gimple_seq_add_stmt (body_p, gimple_build_omp_continue (fd->loop.v,
fd->loop.v));
g = gimple_build_omp_return (true);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, mdlist);
lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (GT_EXPR, num_threads, integer_one_node, lab1, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (body_p, g);
g = omp_build_barrier (NULL);
gimple_seq_add_stmt (body_p, g);
tree down = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (down, build_zero_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, build_one_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
tree num_threadsu = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (num_threadsu, NOP_EXPR, num_threads);
gimple_seq_add_stmt (body_p, g);
tree thread_numu = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (thread_numu, NOP_EXPR, thread_num);
gimple_seq_add_stmt (body_p, g);
tree thread_nump1 = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (thread_nump1, PLUS_EXPR, thread_numu,
build_int_cst (unsigned_type_node, 1));
gimple_seq_add_stmt (body_p, g);
lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (body_p, g);
tree twok = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (twok, LSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
tree lab4 = create_artificial_label (UNKNOWN_LOCATION);
tree lab5 = create_artificial_label (UNKNOWN_LOCATION);
tree lab6 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (GT_EXPR, twok, num_threadsu, lab4, lab5);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab4);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (down, build_all_ones_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, RSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_cond (EQ_EXPR, k, num_threadsu, lab6, lab5);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab6);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, RSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab5);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (twok, LSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
tree cplx = create_tmp_var (build_complex_type (unsigned_type_node, false));
DECL_GIMPLE_REG_P (cplx) = 1;
g = gimple_build_call_internal (IFN_MUL_OVERFLOW, 2, thread_nump1, twok);
gimple_call_set_lhs (g, cplx);
gimple_seq_add_stmt (body_p, g);
tree mul = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (mul, REALPART_EXPR,
build1 (REALPART_EXPR, unsigned_type_node, cplx));
gimple_seq_add_stmt (body_p, g);
tree ovf = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (ovf, IMAGPART_EXPR,
build1 (IMAGPART_EXPR, unsigned_type_node, cplx));
gimple_seq_add_stmt (body_p, g);
tree lab7 = create_artificial_label (UNKNOWN_LOCATION);
tree lab8 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, ovf, build_zero_cst (unsigned_type_node),
lab7, lab8);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab7);
gimple_seq_add_stmt (body_p, g);
tree andv = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (andv, BIT_AND_EXPR, k, down);
gimple_seq_add_stmt (body_p, g);
tree andvm1 = create_tmp_var (unsigned_type_node);
g = gimple_build_assign (andvm1, PLUS_EXPR, andv,
build_minus_one_cst (unsigned_type_node));
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (l, PLUS_EXPR, mul, andvm1);
gimple_seq_add_stmt (body_p, g);
tree lab9 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (LT_EXPR, l, num_threadsu, lab9, lab8);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab9);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, reduc_list);
g = gimple_build_label (lab8);
gimple_seq_add_stmt (body_p, g);
tree lab10 = create_artificial_label (UNKNOWN_LOCATION);
tree lab11 = create_artificial_label (UNKNOWN_LOCATION);
tree lab12 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, down, build_zero_cst (unsigned_type_node),
lab10, lab11);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab10);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, LSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_goto (lab12);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab11);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (k, RSHIFT_EXPR, k, integer_one_node);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab12);
gimple_seq_add_stmt (body_p, g);
g = omp_build_barrier (NULL);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_cond (NE_EXPR, k, build_zero_cst (unsigned_type_node),
lab3, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (body_p, g);
lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
lab3 = create_artificial_label (UNKNOWN_LOCATION);
g = gimple_build_cond (EQ_EXPR, thread_num, integer_zero_node, lab1, lab2);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thr02_list);
g = gimple_build_goto (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_seq (body_p, thrn2_list);
g = gimple_build_label (lab3);
gimple_seq_add_stmt (body_p, g);
g = gimple_build_assign (ivar, size_zero_node);
gimple_seq_add_stmt (body_p, g);
gimple_seq_add_stmt (body_p, new_stmt);
gimple_seq_add_seq (body_p, new_body);
gimple_seq new_dlist = NULL;
lab1 = create_artificial_label (UNKNOWN_LOCATION);
lab2 = create_artificial_label (UNKNOWN_LOCATION);
tree num_threadsm1 = create_tmp_var (integer_type_node);
g = gimple_build_assign (num_threadsm1, PLUS_EXPR, num_threads,
integer_minus_one_node);
gimple_seq_add_stmt (&new_dlist, g);
g = gimple_build_cond (EQ_EXPR, thread_num, num_threadsm1, lab1, lab2);
gimple_seq_add_stmt (&new_dlist, g);
g = gimple_build_label (lab1);
gimple_seq_add_stmt (&new_dlist, g);
gimple_seq_add_seq (&new_dlist, last_list);
g = gimple_build_label (lab2);
gimple_seq_add_stmt (&new_dlist, g);
gimple_seq_add_seq (&new_dlist, *dlist);
*dlist = new_dlist;
}
/* Lower code for an OMP loop directive. */
static void
lower_omp_for (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree *rhs_p, block;
struct omp_for_data fd, *fdp = NULL;
gomp_for *stmt = as_a <gomp_for *> (gsi_stmt (*gsi_p));
gbind *new_stmt;
gimple_seq omp_for_body, body, dlist, tred_ilist = NULL, tred_dlist = NULL;
gimple_seq cnt_list = NULL, clist = NULL;
gimple_seq oacc_head = NULL, oacc_tail = NULL;
size_t i;
push_gimplify_context ();
lower_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
block = make_node (BLOCK);
new_stmt = gimple_build_bind (NULL, NULL, block);
/* Replace at gsi right away, so that 'stmt' is no member
of a sequence anymore as we're going to add to a different
one below. */
gsi_replace (gsi_p, new_stmt, true);
/* Move declaration of temporaries in the loop body before we make
it go away. */
omp_for_body = gimple_omp_body (stmt);
if (!gimple_seq_empty_p (omp_for_body)
&& gimple_code (gimple_seq_first_stmt (omp_for_body)) == GIMPLE_BIND)
{
gbind *inner_bind
= as_a <gbind *> (gimple_seq_first_stmt (omp_for_body));
tree vars = gimple_bind_vars (inner_bind);
gimple_bind_append_vars (new_stmt, vars);
/* bind_vars/BLOCK_VARS are being moved to new_stmt/block, don't
keep them on the inner_bind and it's block. */
gimple_bind_set_vars (inner_bind, NULL_TREE);
if (gimple_bind_block (inner_bind))
BLOCK_VARS (gimple_bind_block (inner_bind)) = NULL_TREE;
}
if (gimple_omp_for_combined_into_p (stmt))
{
omp_extract_for_data (stmt, &fd, NULL);
fdp = &fd;
/* We need two temporaries with fd.loop.v type (istart/iend)
and then (fd.collapse - 1) temporaries with the same
type for count2 ... countN-1 vars if not constant. */
size_t count = 2;
tree type = fd.iter_type;
if (fd.collapse > 1
&& TREE_CODE (fd.loop.n2) != INTEGER_CST)
count += fd.collapse - 1;
bool taskreg_for
= (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR
|| gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP);
tree outerc = NULL, *pc = gimple_omp_for_clauses_ptr (stmt);
tree simtc = NULL;
tree clauses = *pc;
if (taskreg_for)
outerc
= omp_find_clause (gimple_omp_taskreg_clauses (ctx->outer->stmt),
OMP_CLAUSE__LOOPTEMP_);
if (ctx->simt_stmt)
simtc = omp_find_clause (gimple_omp_for_clauses (ctx->simt_stmt),
OMP_CLAUSE__LOOPTEMP_);
for (i = 0; i < count; i++)
{
tree temp;
if (taskreg_for)
{
gcc_assert (outerc);
temp = lookup_decl (OMP_CLAUSE_DECL (outerc), ctx->outer);
outerc = omp_find_clause (OMP_CLAUSE_CHAIN (outerc),
OMP_CLAUSE__LOOPTEMP_);
}
else
{
/* If there are 2 adjacent SIMD stmts, one with _simt_
clause, another without, make sure they have the same
decls in _looptemp_ clauses, because the outer stmt
they are combined into will look up just one inner_stmt. */
if (ctx->simt_stmt)
temp = OMP_CLAUSE_DECL (simtc);
else
temp = create_tmp_var (type);
insert_decl_map (&ctx->outer->cb, temp, temp);
}
*pc = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
OMP_CLAUSE_DECL (*pc) = temp;
pc = &OMP_CLAUSE_CHAIN (*pc);
if (ctx->simt_stmt)
simtc = omp_find_clause (OMP_CLAUSE_CHAIN (simtc),
OMP_CLAUSE__LOOPTEMP_);
}
*pc = clauses;
}
/* The pre-body and input clauses go before the lowered GIMPLE_OMP_FOR. */
dlist = NULL;
body = NULL;
tree rclauses
= omp_task_reductions_find_first (gimple_omp_for_clauses (stmt), OMP_FOR,
OMP_CLAUSE_REDUCTION);
tree rtmp = NULL_TREE;
if (rclauses)
{
tree type = build_pointer_type (pointer_sized_int_node);
tree temp = create_tmp_var (type);
tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__REDUCTEMP_);
OMP_CLAUSE_DECL (c) = temp;
OMP_CLAUSE_CHAIN (c) = gimple_omp_for_clauses (stmt);
gimple_omp_for_set_clauses (stmt, c);
lower_omp_task_reductions (ctx, OMP_FOR,
gimple_omp_for_clauses (stmt),
&tred_ilist, &tred_dlist);
rclauses = c;
rtmp = make_ssa_name (type);
gimple_seq_add_stmt (&body, gimple_build_assign (rtmp, temp));
}
lower_lastprivate_conditional_clauses (gimple_omp_for_clauses_ptr (stmt),
ctx);
lower_rec_input_clauses (gimple_omp_for_clauses (stmt), &body, &dlist, ctx,
fdp);
gimple_seq_add_seq (rclauses ? &tred_ilist : &body,
gimple_omp_for_pre_body (stmt));
lower_omp (gimple_omp_body_ptr (stmt), ctx);
/* Lower the header expressions. At this point, we can assume that
the header is of the form:
#pragma omp for (V = VAL1; V {<|>|<=|>=} VAL2; V = V [+-] VAL3)
We just need to make sure that VAL1, VAL2 and VAL3 are lowered
using the .omp_data_s mapping, if needed. */
for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
{
rhs_p = gimple_omp_for_initial_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = gimple_omp_for_final_ptr (stmt, i);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
else if (TREE_CODE (*rhs_p) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (*rhs_p);
rhs_p = &TREE_OPERAND (gimple_omp_for_incr (stmt, i), 1);
if (!is_gimple_min_invariant (*rhs_p))
*rhs_p = get_formal_tmp_var (*rhs_p, &cnt_list);
}
if (rclauses)
gimple_seq_add_seq (&tred_ilist, cnt_list);
else
gimple_seq_add_seq (&body, cnt_list);
/* Once lowered, extract the bounds and clauses. */
omp_extract_for_data (stmt, &fd, NULL);
if (is_gimple_omp_oacc (ctx->stmt)
&& !ctx_in_oacc_kernels_region (ctx))
lower_oacc_head_tail (gimple_location (stmt),
gimple_omp_for_clauses (stmt),
&oacc_head, &oacc_tail, ctx);
/* Add OpenACC partitioning and reduction markers just before the loop. */
if (oacc_head)
gimple_seq_add_seq (&body, oacc_head);
lower_omp_for_lastprivate (&fd, &body, &dlist, &clist, ctx);
if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
&& !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
{
OMP_CLAUSE_DECL (c) = lookup_decl (OMP_CLAUSE_DECL (c), ctx);
if (DECL_P (OMP_CLAUSE_LINEAR_STEP (c)))
OMP_CLAUSE_LINEAR_STEP (c)
= maybe_lookup_decl_in_outer_ctx (OMP_CLAUSE_LINEAR_STEP (c),
ctx);
}
bool phony_loop = (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP
&& gimple_omp_for_grid_phony (stmt));
if ((ctx->scan_inclusive || ctx->scan_exclusive)
&& gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_FOR)
{
gcc_assert (!phony_loop);
lower_omp_for_scan (&body, &dlist, stmt, &fd, ctx);
}
else
{
if (!phony_loop)
gimple_seq_add_stmt (&body, stmt);
gimple_seq_add_seq (&body, gimple_omp_body (stmt));
}
if (!phony_loop)
gimple_seq_add_stmt (&body, gimple_build_omp_continue (fd.loop.v,
fd.loop.v));
/* After the loop, add exit clauses. */
lower_reduction_clauses (gimple_omp_for_clauses (stmt), &body, &clist, ctx);
if (clist)
{
tree fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_START);
gcall *g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&body, g);
gimple_seq_add_seq (&body, clist);
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_ATOMIC_END);
g = gimple_build_call (fndecl, 0);
gimple_seq_add_stmt (&body, g);
}
if (ctx->cancellable)
gimple_seq_add_stmt (&body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&body, dlist);
if (rclauses)
{
gimple_seq_add_seq (&tred_ilist, body);
body = tred_ilist;
}
body = maybe_catch_exception (body);
if (!phony_loop)
{
/* Region exit marker goes at the end of the loop body. */
gimple *g = gimple_build_omp_return (fd.have_nowait);
gimple_seq_add_stmt (&body, g);
gimple_seq_add_seq (&body, tred_dlist);
maybe_add_implicit_barrier_cancel (ctx, g, &body);
if (rclauses)
OMP_CLAUSE_DECL (rclauses) = rtmp;
}
/* Add OpenACC joining and reduction markers just after the loop. */
if (oacc_tail)
gimple_seq_add_seq (&body, oacc_tail);
pop_gimplify_context (new_stmt);
gimple_bind_append_vars (new_stmt, ctx->block_vars);
maybe_remove_omp_member_access_dummy_vars (new_stmt);
BLOCK_VARS (block) = gimple_bind_vars (new_stmt);
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
gimple_bind_set_body (new_stmt, body);
gimple_omp_set_body (stmt, NULL);
gimple_omp_for_set_pre_body (stmt, NULL);
}
/* Callback for walk_stmts. Check if the current statement only contains
GIMPLE_OMP_FOR or GIMPLE_OMP_SECTIONS. */
static tree
check_combined_parallel (gimple_stmt_iterator *gsi_p,
bool *handled_ops_p,
struct walk_stmt_info *wi)
{
int *info = (int *) wi->info;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_DEBUG:
break;
case GIMPLE_OMP_FOR:
case GIMPLE_OMP_SECTIONS:
*info = *info == 0 ? 1 : -1;
break;
default:
*info = -1;
break;
}
return NULL;
}
struct omp_taskcopy_context
{
/* This field must be at the beginning, as we do "inheritance": Some
callback functions for tree-inline.c (e.g., omp_copy_decl)
receive a copy_body_data pointer that is up-casted to an
omp_context pointer. */
copy_body_data cb;
omp_context *ctx;
};
static tree
task_copyfn_copy_decl (tree var, copy_body_data *cb)
{
struct omp_taskcopy_context *tcctx = (struct omp_taskcopy_context *) cb;
if (splay_tree_lookup (tcctx->ctx->sfield_map, (splay_tree_key) var))
return create_tmp_var (TREE_TYPE (var));
return var;
}
static tree
task_copyfn_remap_type (struct omp_taskcopy_context *tcctx, tree orig_type)
{
tree name, new_fields = NULL, type, f;
type = lang_hooks.types.make_type (RECORD_TYPE);
name = DECL_NAME (TYPE_NAME (orig_type));
name = build_decl (gimple_location (tcctx->ctx->stmt),
TYPE_DECL, name, type);
TYPE_NAME (type) = name;
for (f = TYPE_FIELDS (orig_type); f ; f = TREE_CHAIN (f))
{
tree new_f = copy_node (f);
DECL_CONTEXT (new_f) = type;
TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &tcctx->cb);
TREE_CHAIN (new_f) = new_fields;
walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r, &tcctx->cb, NULL);
walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
&tcctx->cb, NULL);
new_fields = new_f;
tcctx->cb.decl_map->put (f, new_f);
}
TYPE_FIELDS (type) = nreverse (new_fields);
layout_type (type);
return type;
}
/* Create task copyfn. */
static void
create_task_copyfn (gomp_task *task_stmt, omp_context *ctx)
{
struct function *child_cfun;
tree child_fn, t, c, src, dst, f, sf, arg, sarg, decl;
tree record_type, srecord_type, bind, list;
bool record_needs_remap = false, srecord_needs_remap = false;
splay_tree_node n;
struct omp_taskcopy_context tcctx;
location_t loc = gimple_location (task_stmt);
size_t looptempno = 0;
child_fn = gimple_omp_task_copy_fn (task_stmt);
child_cfun = DECL_STRUCT_FUNCTION (child_fn);
gcc_assert (child_cfun->cfg == NULL);
DECL_SAVED_TREE (child_fn) = alloc_stmt_list ();
/* Reset DECL_CONTEXT on function arguments. */
for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
DECL_CONTEXT (t) = child_fn;
/* Populate the function. */
push_gimplify_context ();
push_cfun (child_cfun);
bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL);
TREE_SIDE_EFFECTS (bind) = 1;
list = NULL;
DECL_SAVED_TREE (child_fn) = bind;
DECL_SOURCE_LOCATION (child_fn) = gimple_location (task_stmt);
/* Remap src and dst argument types if needed. */
record_type = ctx->record_type;
srecord_type = ctx->srecord_type;
for (f = TYPE_FIELDS (record_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
record_needs_remap = true;
break;
}
for (f = TYPE_FIELDS (srecord_type); f ; f = DECL_CHAIN (f))
if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
{
srecord_needs_remap = true;
break;
}
if (record_needs_remap || srecord_needs_remap)
{
memset (&tcctx, '\0', sizeof (tcctx));
tcctx.cb.src_fn = ctx->cb.src_fn;
tcctx.cb.dst_fn = child_fn;
tcctx.cb.src_node = cgraph_node::get (tcctx.cb.src_fn);
gcc_checking_assert (tcctx.cb.src_node);
tcctx.cb.dst_node = tcctx.cb.src_node;
tcctx.cb.src_cfun = ctx->cb.src_cfun;
tcctx.cb.copy_decl = task_copyfn_copy_decl;
tcctx.cb.eh_lp_nr = 0;
tcctx.cb.transform_call_graph_edges = CB_CGE_MOVE;
tcctx.cb.decl_map = new hash_map<tree, tree>;
tcctx.ctx = ctx;
if (record_needs_remap)
record_type = task_copyfn_remap_type (&tcctx, record_type);
if (srecord_needs_remap)
srecord_type = task_copyfn_remap_type (&tcctx, srecord_type);
}
else
tcctx.cb.decl_map = NULL;
arg = DECL_ARGUMENTS (child_fn);
TREE_TYPE (arg) = build_pointer_type (record_type);
sarg = DECL_CHAIN (arg);
TREE_TYPE (sarg) = build_pointer_type (srecord_type);
/* First pass: initialize temporaries used in record_type and srecord_type
sizes and field offsets. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree *p;
decl = OMP_CLAUSE_DECL (c);
p = tcctx.cb.decl_map->get (decl);
if (p == NULL)
continue;
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
t = build2 (MODIFY_EXPR, TREE_TYPE (*p), *p, src);
append_to_statement_list (t, &list);
}
/* Second pass: copy shared var pointers and copy construct non-VLA
firstprivate vars. */
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
splay_tree_key key;
case OMP_CLAUSE_SHARED:
decl = OMP_CLAUSE_DECL (c);
key = (splay_tree_key) decl;
if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
key = (splay_tree_key) &DECL_UID (decl);
n = splay_tree_lookup (ctx->field_map, key);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, key);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE_REDUCTION:
case OMP_CLAUSE_IN_REDUCTION:
decl = OMP_CLAUSE_DECL (c);
if (TREE_CODE (decl) == MEM_REF)
{
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == POINTER_PLUS_EXPR)
decl = TREE_OPERAND (decl, 0);
if (TREE_CODE (decl) == INDIRECT_REF
|| TREE_CODE (decl) == ADDR_EXPR)
decl = TREE_OPERAND (decl, 0);
}
key = (splay_tree_key) decl;
n = splay_tree_lookup (ctx->field_map, key);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, key);
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (decl != OMP_CLAUSE_DECL (c)
&& TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE
&& TREE_CODE (TREE_TYPE (TREE_TYPE (decl))) == POINTER_TYPE)
src = build_simple_mem_ref_loc (loc, src);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE__LOOPTEMP_:
/* Fields for first two _looptemp_ clauses are initialized by
GOMP_taskloop*, the rest are handled like firstprivate. */
if (looptempno < 2)
{
looptempno++;
break;
}
/* FALLTHRU */
case OMP_CLAUSE__REDUCTEMP_:
case OMP_CLAUSE_FIRSTPRIVATE:
decl = OMP_CLAUSE_DECL (c);
if (is_variable_sized (decl))
break;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
break;
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL) || omp_is_reference (decl))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE)
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
else
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
break;
case OMP_CLAUSE_PRIVATE:
if (! OMP_CLAUSE_PRIVATE_OUTER_REF (c))
break;
decl = OMP_CLAUSE_DECL (c);
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
f = (tree) n->value;
if (tcctx.cb.decl_map)
f = *tcctx.cb.decl_map->get (f);
n = splay_tree_lookup (ctx->sfield_map, (splay_tree_key) decl);
if (n != NULL)
{
sf = (tree) n->value;
if (tcctx.cb.decl_map)
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
if (use_pointer_for_field (decl, NULL))
src = build_simple_mem_ref_loc (loc, src);
}
else
src = decl;
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = build2 (MODIFY_EXPR, TREE_TYPE (dst), dst, src);
append_to_statement_list (t, &list);
break;
default:
break;
}
/* Last pass: handle VLA firstprivates. */
if (tcctx.cb.decl_map)
for (c = gimple_omp_task_clauses (task_stmt); c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
tree ind, ptr, df;
decl = OMP_CLAUSE_DECL (c);
if (!is_variable_sized (decl))
continue;
n = splay_tree_lookup (ctx->field_map, (splay_tree_key) decl);
if (n == NULL)
continue;
f = (tree) n->value;
f = *tcctx.cb.decl_map->get (f);
gcc_assert (DECL_HAS_VALUE_EXPR_P (decl));
ind = DECL_VALUE_EXPR (decl);
gcc_assert (TREE_CODE (ind) == INDIRECT_REF);
gcc_assert (DECL_P (TREE_OPERAND (ind, 0)));
n = splay_tree_lookup (ctx->sfield_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
sf = (tree) n->value;
sf = *tcctx.cb.decl_map->get (sf);
src = build_simple_mem_ref_loc (loc, sarg);
src = omp_build_component_ref (src, sf);
src = build_simple_mem_ref_loc (loc, src);
dst = build_simple_mem_ref_loc (loc, arg);
dst = omp_build_component_ref (dst, f);
t = lang_hooks.decls.omp_clause_copy_ctor (c, dst, src);
append_to_statement_list (t, &list);
n = splay_tree_lookup (ctx->field_map,
(splay_tree_key) TREE_OPERAND (ind, 0));
df = (tree) n->value;
df = *tcctx.cb.decl_map->get (df);
ptr = build_simple_mem_ref_loc (loc, arg);
ptr = omp_build_component_ref (ptr, df);
t = build2 (MODIFY_EXPR, TREE_TYPE (ptr), ptr,
build_fold_addr_expr_loc (loc, dst));
append_to_statement_list (t, &list);
}
t = build1 (RETURN_EXPR, void_type_node, NULL);
append_to_statement_list (t, &list);
if (tcctx.cb.decl_map)
delete tcctx.cb.decl_map;
pop_gimplify_context (NULL);
BIND_EXPR_BODY (bind) = list;
pop_cfun ();
}
static void
lower_depend_clauses (tree *pclauses, gimple_seq *iseq, gimple_seq *oseq)
{
tree c, clauses;
gimple *g;
size_t cnt[4] = { 0, 0, 0, 0 }, idx = 2, i;
clauses = omp_find_clause (*pclauses, OMP_CLAUSE_DEPEND);
gcc_assert (clauses);
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND)
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_LAST:
/* Lowering already done at gimplification. */
return;
case OMP_CLAUSE_DEPEND_IN:
cnt[2]++;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
cnt[0]++;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
cnt[1]++;
break;
case OMP_CLAUSE_DEPEND_DEPOBJ:
cnt[3]++;
break;
case OMP_CLAUSE_DEPEND_SOURCE:
case OMP_CLAUSE_DEPEND_SINK:
/* FALLTHRU */
default:
gcc_unreachable ();
}
if (cnt[1] || cnt[3])
idx = 5;
size_t total = cnt[0] + cnt[1] + cnt[2] + cnt[3];
tree type = build_array_type_nelts (ptr_type_node, total + idx);
tree array = create_tmp_var (type);
TREE_ADDRESSABLE (array) = 1;
tree r = build4 (ARRAY_REF, ptr_type_node, array, size_int (0), NULL_TREE,
NULL_TREE);
if (idx == 5)
{
g = gimple_build_assign (r, build_int_cst (ptr_type_node, 0));
gimple_seq_add_stmt (iseq, g);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (1), NULL_TREE,
NULL_TREE);
}
g = gimple_build_assign (r, build_int_cst (ptr_type_node, total));
gimple_seq_add_stmt (iseq, g);
for (i = 0; i < (idx == 5 ? 3 : 1); i++)
{
r = build4 (ARRAY_REF, ptr_type_node, array,
size_int (i + 1 + (idx == 5)), NULL_TREE, NULL_TREE);
g = gimple_build_assign (r, build_int_cst (ptr_type_node, cnt[i]));
gimple_seq_add_stmt (iseq, g);
}
for (i = 0; i < 4; i++)
{
if (cnt[i] == 0)
continue;
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
continue;
else
{
switch (OMP_CLAUSE_DEPEND_KIND (c))
{
case OMP_CLAUSE_DEPEND_IN:
if (i != 2)
continue;
break;
case OMP_CLAUSE_DEPEND_OUT:
case OMP_CLAUSE_DEPEND_INOUT:
if (i != 0)
continue;
break;
case OMP_CLAUSE_DEPEND_MUTEXINOUTSET:
if (i != 1)
continue;
break;
case OMP_CLAUSE_DEPEND_DEPOBJ:
if (i != 3)
continue;
break;
default:
gcc_unreachable ();
}
tree t = OMP_CLAUSE_DECL (c);
t = fold_convert (ptr_type_node, t);
gimplify_expr (&t, iseq, NULL, is_gimple_val, fb_rvalue);
r = build4 (ARRAY_REF, ptr_type_node, array, size_int (idx++),
NULL_TREE, NULL_TREE);
g = gimple_build_assign (r, t);
gimple_seq_add_stmt (iseq, g);
}
}
c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE_DEPEND);
OMP_CLAUSE_DEPEND_KIND (c) = OMP_CLAUSE_DEPEND_LAST;
OMP_CLAUSE_DECL (c) = build_fold_addr_expr (array);
OMP_CLAUSE_CHAIN (c) = *pclauses;
*pclauses = c;
tree clobber = build_clobber (type);
g = gimple_build_assign (array, clobber);
gimple_seq_add_stmt (oseq, g);
}
/* Lower the OpenMP parallel or task directive in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_taskreg (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t;
gimple *stmt = gsi_stmt (*gsi_p);
gbind *par_bind, *bind, *dep_bind = NULL;
gimple_seq par_body;
location_t loc = gimple_location (stmt);
clauses = gimple_omp_taskreg_clauses (stmt);
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& gimple_omp_task_taskwait_p (stmt))
{
par_bind = NULL;
par_body = NULL;
}
else
{
par_bind
= as_a <gbind *> (gimple_seq_first_stmt (gimple_omp_body (stmt)));
par_body = gimple_bind_body (par_bind);
}
child_fn = ctx->cb.dst_fn;
if (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& !gimple_omp_parallel_combined_p (stmt))
{
struct walk_stmt_info wi;
int ws_num = 0;
memset (&wi, 0, sizeof (wi));
wi.info = &ws_num;
wi.val_only = true;
walk_gimple_seq (par_body, check_combined_parallel, NULL, &wi);
if (ws_num == 1)
gimple_omp_parallel_set_combined_p (stmt, true);
}
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_task_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
if (gimple_code (stmt) == GIMPLE_OMP_TASK
&& gimple_omp_task_taskwait_p (stmt))
{
if (dep_bind)
{
gsi_replace (gsi_p, dep_bind, true);
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, stmt);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
return;
}
if (ctx->srecord_type)
create_task_copyfn (as_a <gomp_task *> (stmt), ctx);
gimple_seq tskred_ilist = NULL;
gimple_seq tskred_olist = NULL;
if ((is_task_ctx (ctx)
&& gimple_omp_task_taskloop_p (ctx->stmt)
&& omp_find_clause (gimple_omp_task_clauses (ctx->stmt),
OMP_CLAUSE_REDUCTION))
|| (is_parallel_ctx (ctx)
&& omp_find_clause (gimple_omp_parallel_clauses (stmt),
OMP_CLAUSE__REDUCTEMP_)))
{
if (dep_bind == NULL)
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
}
lower_omp_task_reductions (ctx, is_task_ctx (ctx) ? OMP_TASKLOOP
: OMP_PARALLEL,
gimple_omp_taskreg_clauses (ctx->stmt),
&tskred_ilist, &tskred_olist);
}
push_gimplify_context ();
gimple_seq par_olist = NULL;
gimple_seq par_ilist = NULL;
gimple_seq par_rlist = NULL;
bool phony_construct = gimple_code (stmt) == GIMPLE_OMP_PARALLEL
&& gimple_omp_parallel_grid_phony (as_a <gomp_parallel *> (stmt));
if (phony_construct && ctx->record_type)
{
gcc_checking_assert (!ctx->receiver_decl);
ctx->receiver_decl = create_tmp_var
(build_reference_type (ctx->record_type), ".omp_rec");
}
lower_rec_input_clauses (clauses, &par_ilist, &par_olist, ctx, NULL);
lower_omp (&par_body, ctx);
if (gimple_code (stmt) != GIMPLE_OMP_TASK)
lower_reduction_clauses (clauses, &par_rlist, NULL, ctx);
/* Declare all the variables created by mapping and the variables
declared in the scope of the parallel body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (par_bind);
record_vars_into (gimple_bind_vars (par_bind), child_fn);
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->srecord_type ? ctx->srecord_type
: ctx->record_type, ".omp_data_o");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
gimple_omp_taskreg_set_data_arg (stmt, ctx->sender_decl);
}
gimple_seq olist = NULL;
gimple_seq ilist = NULL;
lower_send_clauses (clauses, &ilist, &olist, ctx);
lower_send_shared_vars (&ilist, &olist, ctx);
if (ctx->record_type)
{
tree clobber = build_clobber (TREE_TYPE (ctx->sender_decl));
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
gimple_seq new_body = NULL;
if (ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, par_ilist);
gimple_seq_add_seq (&new_body, par_body);
gimple_seq_add_seq (&new_body, par_rlist);
if (ctx->cancellable)
gimple_seq_add_stmt (&new_body, gimple_build_label (ctx->cancel_label));
gimple_seq_add_seq (&new_body, par_olist);
new_body = maybe_catch_exception (new_body);
if (gimple_code (stmt) == GIMPLE_OMP_TASK)
gimple_seq_add_stmt (&new_body,
gimple_build_omp_continue (integer_zero_node,
integer_zero_node));
if (!phony_construct)
{
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
gimple_omp_set_body (stmt, new_body);
}
if (dep_bind && gimple_bind_block (par_bind) == NULL_TREE)
bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
else
bind = gimple_build_bind (NULL, NULL, gimple_bind_block (par_bind));
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
if (!phony_construct)
gimple_bind_add_stmt (bind, stmt);
else
gimple_bind_add_seq (bind, new_body);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_seq (dep_bind, tskred_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, tskred_olist);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Lower the GIMPLE_OMP_TARGET in the current statement
in GSI_P. CTX holds context information for the directive. */
static void
lower_omp_target (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
tree clauses;
tree child_fn, t, c;
gomp_target *stmt = as_a <gomp_target *> (gsi_stmt (*gsi_p));
gbind *tgt_bind, *bind, *dep_bind = NULL;
gimple_seq tgt_body, olist, ilist, fplist, new_body;
location_t loc = gimple_location (stmt);
bool offloaded, data_region;
unsigned int map_cnt = 0;
offloaded = is_gimple_omp_offloaded (stmt);
switch (gimple_omp_target_kind (stmt))
{
case GF_OMP_TARGET_KIND_REGION:
case GF_OMP_TARGET_KIND_UPDATE:
case GF_OMP_TARGET_KIND_ENTER_DATA:
case GF_OMP_TARGET_KIND_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_PARALLEL:
case GF_OMP_TARGET_KIND_OACC_KERNELS:
case GF_OMP_TARGET_KIND_OACC_SERIAL:
case GF_OMP_TARGET_KIND_OACC_UPDATE:
case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
case GF_OMP_TARGET_KIND_OACC_DECLARE:
data_region = false;
break;
case GF_OMP_TARGET_KIND_DATA:
case GF_OMP_TARGET_KIND_OACC_DATA:
case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
data_region = true;
break;
default:
gcc_unreachable ();
}
clauses = gimple_omp_target_clauses (stmt);
gimple_seq dep_ilist = NULL;
gimple_seq dep_olist = NULL;
if (omp_find_clause (clauses, OMP_CLAUSE_DEPEND))
{
push_gimplify_context ();
dep_bind = gimple_build_bind (NULL, NULL, make_node (BLOCK));
lower_depend_clauses (gimple_omp_target_clauses_ptr (stmt),
&dep_ilist, &dep_olist);
}
tgt_bind = NULL;
tgt_body = NULL;
if (offloaded)
{
tgt_bind = gimple_seq_first_stmt_as_a_bind (gimple_omp_body (stmt));
tgt_body = gimple_bind_body (tgt_bind);
}
else if (data_region)
tgt_body = gimple_omp_body (stmt);
child_fn = ctx->cb.dst_fn;
push_gimplify_context ();
fplist = NULL;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_MAP:
#if CHECKING_P
/* First check what we're prepared to handle in the following. */
switch (OMP_CLAUSE_MAP_KIND (c))
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_POINTER:
case GOMP_MAP_TO_PSET:
case GOMP_MAP_DELETE:
case GOMP_MAP_RELEASE:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_FIRSTPRIVATE_POINTER:
case GOMP_MAP_FIRSTPRIVATE_REFERENCE:
case GOMP_MAP_STRUCT:
case GOMP_MAP_ALWAYS_POINTER:
break;
case GOMP_MAP_IF_PRESENT:
case GOMP_MAP_FORCE_ALLOC:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
case GOMP_MAP_FORCE_DEVICEPTR:
case GOMP_MAP_DEVICE_RESIDENT:
case GOMP_MAP_LINK:
case GOMP_MAP_ATTACH:
case GOMP_MAP_DETACH:
case GOMP_MAP_FORCE_DETACH:
gcc_assert (is_gimple_omp_oacc (stmt));
break;
default:
gcc_unreachable ();
}
#endif
/* FALLTHRU */
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate:
var = OMP_CLAUSE_DECL (c);
if (!DECL_P (var))
{
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP
|| (!OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& (OMP_CLAUSE_MAP_KIND (c)
!= GOMP_MAP_FIRSTPRIVATE_POINTER)))
map_cnt++;
continue;
}
if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
if (offloaded
&& OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE))
{
if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx))
&& varpool_node::get_create (var)->offloadable)
continue;
tree type = build_pointer_type (TREE_TYPE (var));
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
continue;
}
if (!maybe_lookup_field (var, ctx))
continue;
/* Don't remap compute constructs' reduction variables, because the
intermediate result must be local to each gang. */
if (offloaded && !(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_IN_REDUCTION (c)))
{
x = build_receiver_ref (var, true, ctx);
tree new_var = lookup_decl (var, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
x = build_simple_mem_ref (x);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (omp_is_reference (new_var)
&& (TREE_CODE (TREE_TYPE (new_var)) != POINTER_TYPE
|| DECL_BY_REFERENCE (var)))
{
/* Create a local object to hold the instance
value. */
tree type = TREE_TYPE (TREE_TYPE (new_var));
const char *id = IDENTIFIER_POINTER (DECL_NAME (new_var));
tree inst = create_tmp_var (type, id);
gimplify_assign (inst, fold_indirect_ref (x), &fplist);
x = build_fold_addr_expr (inst);
}
gimplify_assign (new_var, x, &fplist);
}
else if (DECL_P (new_var))
{
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
gcc_unreachable ();
}
map_cnt++;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
gcc_checking_assert (offloaded);
if (is_gimple_omp_oacc (ctx->stmt))
{
/* No 'firstprivate' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
goto oacc_firstprivate;
}
map_cnt++;
var = OMP_CLAUSE_DECL (c);
if (!omp_is_reference (var)
&& !is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
}
else
x = build_receiver_ref (var, true, ctx);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_PRIVATE:
gcc_checking_assert (offloaded);
if (is_gimple_omp_oacc (ctx->stmt))
{
/* No 'private' clauses on OpenACC 'kernels'. */
gcc_checking_assert (!is_oacc_kernels (ctx));
break;
}
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
var = OMP_CLAUSE_DECL (c);
map_cnt++;
if (is_variable_sized (var))
{
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
x = build_fold_indirect_ref (new_pvar);
TREE_THIS_NOTRAP (x) = 1;
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (var)
&& !omp_is_allocatable_or_ptr (var)
&& !lang_hooks.decls.omp_array_data (var, true))
|| TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
tree new_var = lookup_decl (var, ctx);
tree type = build_pointer_type (TREE_TYPE (var));
x = create_tmp_var_raw (type, get_name (new_var));
gimple_add_tmp_var (x);
x = build_simple_mem_ref (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
else
{
tree new_var = lookup_decl (var, ctx);
x = create_tmp_var_raw (TREE_TYPE (new_var), get_name (new_var));
gimple_add_tmp_var (x);
SET_DECL_VALUE_EXPR (new_var, x);
DECL_HAS_VALUE_EXPR_P (new_var) = 1;
}
break;
}
if (offloaded)
{
target_nesting_level++;
lower_omp (&tgt_body, ctx);
target_nesting_level--;
}
else if (data_region)
lower_omp (&tgt_body, ctx);
if (offloaded)
{
/* Declare all the variables created by mapping and the variables
declared in the scope of the target body. */
record_vars_into (ctx->block_vars, child_fn);
maybe_remove_omp_member_access_dummy_vars (tgt_bind);
record_vars_into (gimple_bind_vars (tgt_bind), child_fn);
}
olist = NULL;
ilist = NULL;
if (ctx->record_type)
{
ctx->sender_decl
= create_tmp_var (ctx->record_type, ".omp_data_arr");
DECL_NAMELESS (ctx->sender_decl) = 1;
TREE_ADDRESSABLE (ctx->sender_decl) = 1;
t = make_tree_vec (3);
TREE_VEC_ELT (t, 0) = ctx->sender_decl;
TREE_VEC_ELT (t, 1)
= create_tmp_var (build_array_type_nelts (size_type_node, map_cnt),
".omp_data_sizes");
DECL_NAMELESS (TREE_VEC_ELT (t, 1)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 1)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 1;
tree tkind_type = short_unsigned_type_node;
int talign_shift = 8;
TREE_VEC_ELT (t, 2)
= create_tmp_var (build_array_type_nelts (tkind_type, map_cnt),
".omp_data_kinds");
DECL_NAMELESS (TREE_VEC_ELT (t, 2)) = 1;
TREE_ADDRESSABLE (TREE_VEC_ELT (t, 2)) = 1;
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 1;
gimple_omp_target_set_data_arg (stmt, t);
vec<constructor_elt, va_gc> *vsize;
vec<constructor_elt, va_gc> *vkind;
vec_alloc (vsize, map_cnt);
vec_alloc (vkind, map_cnt);
unsigned int map_idx = 0;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree ovar, nc, s, purpose, var, x, type;
unsigned int talign;
default:
break;
case OMP_CLAUSE_MAP:
case OMP_CLAUSE_TO:
case OMP_CLAUSE_FROM:
oacc_firstprivate_map:
nc = c;
ovar = OMP_CLAUSE_DECL (c);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (c)
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
break;
if (!DECL_P (ovar))
{
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
{
gcc_checking_assert (OMP_CLAUSE_DECL (OMP_CLAUSE_CHAIN (c))
== get_base_address (ovar));
nc = OMP_CLAUSE_CHAIN (c);
ovar = OMP_CLAUSE_DECL (nc);
}
else
{
tree x = build_sender_ref (ovar, ctx);
tree v
= build_fold_addr_expr_with_type (ovar, ptr_type_node);
gimplify_assign (x, v, &ilist);
nc = NULL_TREE;
}
}
else
{
if (DECL_SIZE (ovar)
&& TREE_CODE (DECL_SIZE (ovar)) != INTEGER_CST)
{
tree ovar2 = DECL_VALUE_EXPR (ovar);
gcc_assert (TREE_CODE (ovar2) == INDIRECT_REF);
ovar2 = TREE_OPERAND (ovar2, 0);
gcc_assert (DECL_P (ovar2));
ovar = ovar2;
}
if (!maybe_lookup_field (ovar, ctx))
continue;
}
talign = TYPE_ALIGN_UNIT (TREE_TYPE (ovar));
if (DECL_P (ovar) && DECL_ALIGN_UNIT (ovar) > talign)
talign = DECL_ALIGN_UNIT (ovar);
if (nc)
{
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
&& OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
&& !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
&& TREE_CODE (TREE_TYPE (ovar)) == ARRAY_TYPE)
{
gcc_assert (offloaded);
tree avar
= create_tmp_var (TREE_TYPE (TREE_TYPE (x)));
mark_addressable (avar);
gimplify_assign (avar, build_fold_addr_expr (var), &ilist);
talign = DECL_ALIGN_UNIT (avar);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_assert (is_gimple_omp_oacc (ctx->stmt));
if (!omp_is_reference (var))
{
if (is_gimple_reg (var)
&& OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
var = build_fold_addr_expr (var);
}
else
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
gimplify_assign (x, var, &ilist);
}
else if (is_gimple_reg (var))
{
gcc_assert (offloaded);
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
enum gomp_map_kind map_kind = OMP_CLAUSE_MAP_KIND (c);
if (GOMP_MAP_COPY_TO_P (map_kind)
|| map_kind == GOMP_MAP_POINTER
|| map_kind == GOMP_MAP_TO_PSET
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
{
/* If we need to initialize a temporary
with VAR because it is not addressable, and
the variable hasn't been initialized yet, then
we'll get a warning for the store to avar.
Don't warn in that case, the mapping might
be implicit. */
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
}
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
if ((GOMP_MAP_COPY_FROM_P (map_kind)
|| map_kind == GOMP_MAP_FORCE_DEVICEPTR)
&& !TYPE_READONLY (TREE_TYPE (var)))
{
x = unshare_expr (x);
x = build_simple_mem_ref (x);
gimplify_assign (var, x, &olist);
}
}
else
{
/* While MAP is handled explicitly by the FE,
for 'target update', only the identified is passed. */
if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM
|| OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO)
&& (omp_is_allocatable_or_ptr (var)
&& omp_check_optional_argument (var, false)))
var = build_fold_indirect_ref (var);
else if ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FROM
&& OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TO)
|| (!omp_is_allocatable_or_ptr (var)
&& !omp_check_optional_argument (var, false)))
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
}
s = NULL_TREE;
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
{
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
s = TREE_TYPE (ovar);
if (TREE_CODE (s) == REFERENCE_TYPE
|| omp_check_optional_argument (ovar, false))
s = TREE_TYPE (s);
s = TYPE_SIZE_UNIT (s);
}
else
s = OMP_CLAUSE_SIZE (c);
if (s == NULL_TREE)
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
unsigned HOST_WIDE_INT tkind, tkind_zero;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_MAP:
tkind = OMP_CLAUSE_MAP_KIND (c);
tkind_zero = tkind;
if (OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c))
switch (tkind)
{
case GOMP_MAP_ALLOC:
case GOMP_MAP_IF_PRESENT:
case GOMP_MAP_TO:
case GOMP_MAP_FROM:
case GOMP_MAP_TOFROM:
case GOMP_MAP_ALWAYS_TO:
case GOMP_MAP_ALWAYS_FROM:
case GOMP_MAP_ALWAYS_TOFROM:
case GOMP_MAP_RELEASE:
case GOMP_MAP_FORCE_TO:
case GOMP_MAP_FORCE_FROM:
case GOMP_MAP_FORCE_TOFROM:
case GOMP_MAP_FORCE_PRESENT:
tkind_zero = GOMP_MAP_ZERO_LEN_ARRAY_SECTION;
break;
case GOMP_MAP_DELETE:
tkind_zero = GOMP_MAP_DELETE_ZERO_LEN_ARRAY_SECTION;
default:
break;
}
if (tkind_zero != tkind)
{
if (integer_zerop (s))
tkind = tkind_zero;
else if (integer_nonzerop (s))
tkind_zero = tkind;
}
break;
case OMP_CLAUSE_FIRSTPRIVATE:
gcc_checking_assert (is_gimple_omp_oacc (ctx->stmt));
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_TO:
tkind = GOMP_MAP_TO;
tkind_zero = tkind;
break;
case OMP_CLAUSE_FROM:
tkind = GOMP_MAP_FROM;
tkind_zero = tkind;
break;
default:
gcc_unreachable ();
}
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind_zero
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
tkind_zero |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
gcc_checking_assert (tkind_zero
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
if (tkind == tkind_zero)
x = build_int_cstu (tkind_type, tkind);
else
{
TREE_STATIC (TREE_VEC_ELT (t, 2)) = 0;
x = build3 (COND_EXPR, tkind_type,
fold_build2 (EQ_EXPR, boolean_type_node,
unshare_expr (s), size_zero_node),
build_int_cstu (tkind_type, tkind_zero),
build_int_cstu (tkind_type, tkind));
}
CONSTRUCTOR_APPEND_ELT (vkind, purpose, x);
if (nc && nc != c)
c = nc;
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
goto oacc_firstprivate_map;
ovar = OMP_CLAUSE_DECL (c);
if (omp_is_reference (ovar))
talign = TYPE_ALIGN_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
talign = DECL_ALIGN_UNIT (ovar);
var = lookup_decl_in_outer_ctx (ovar, ctx);
x = build_sender_ref (ovar, ctx);
tkind = GOMP_MAP_FIRSTPRIVATE;
type = TREE_TYPE (ovar);
if (omp_is_reference (ovar))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
tree t = var;
if (omp_is_reference (var))
t = build_simple_mem_ref (var);
else if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
if (TREE_CODE (type) != POINTER_TYPE)
t = fold_convert (pointer_sized_int_node, t);
t = fold_convert (TREE_TYPE (x), t);
gimplify_assign (x, t, &ilist);
}
else if (omp_is_reference (var))
gimplify_assign (x, var, &ilist);
else if (is_gimple_reg (var))
{
tree avar = create_tmp_var (TREE_TYPE (var));
mark_addressable (avar);
if (OMP_CLAUSE_FIRSTPRIVATE_IMPLICIT (c))
TREE_NO_WARNING (var) = 1;
gimplify_assign (avar, var, &ilist);
avar = build_fold_addr_expr (avar);
gimplify_assign (x, avar, &ilist);
}
else
{
var = build_fold_addr_expr (var);
gimplify_assign (x, var, &ilist);
}
if (tkind == GOMP_MAP_FIRSTPRIVATE_INT)
s = size_int (0);
else if (omp_is_reference (ovar))
s = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (ovar)));
else
s = TYPE_SIZE_UNIT (TREE_TYPE (ovar));
s = fold_convert (size_type_node, s);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
if (TREE_CODE (s) != INTEGER_CST)
TREE_STATIC (TREE_VEC_ELT (t, 1)) = 0;
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
talign = ceil_log2 (talign);
tkind |= talign << talign_shift;
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
ovar = OMP_CLAUSE_DECL (c);
var = lookup_decl_in_outer_ctx (ovar, ctx);
if (lang_hooks.decls.omp_array_data (ovar, true))
{
tkind = (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IS_DEVICE_PTR
? GOMP_MAP_USE_DEVICE_PTR : GOMP_MAP_FIRSTPRIVATE_INT);
x = build_sender_ref ((splay_tree_key) &DECL_NAME (ovar), ctx);
}
else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IS_DEVICE_PTR)
{
tkind = GOMP_MAP_USE_DEVICE_PTR;
x = build_sender_ref ((splay_tree_key) &DECL_UID (ovar), ctx);
}
else
{
tkind = GOMP_MAP_FIRSTPRIVATE_INT;
x = build_sender_ref (ovar, ctx);
}
if (is_gimple_omp_oacc (ctx->stmt))
{
gcc_assert (tkind == GOMP_MAP_USE_DEVICE_PTR);
if (OMP_CLAUSE_USE_DEVICE_PTR_IF_PRESENT (c))
tkind = GOMP_MAP_USE_DEVICE_PTR_IF_PRESENT;
}
type = TREE_TYPE (ovar);
if (lang_hooks.decls.omp_array_data (ovar, true))
var = lang_hooks.decls.omp_array_data (ovar, false);
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (ovar)
&& !omp_is_allocatable_or_ptr (ovar))
|| TREE_CODE (type) == ARRAY_TYPE)
var = build_fold_addr_expr (var);
else
{
if (omp_is_reference (ovar)
|| omp_check_optional_argument (ovar, false)
|| omp_is_allocatable_or_ptr (ovar))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE
&& ((OMP_CLAUSE_CODE (c) != OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_allocatable_or_ptr (ovar))
|| (omp_is_reference (ovar)
&& omp_is_allocatable_or_ptr (ovar))))
var = build_simple_mem_ref (var);
var = fold_convert (TREE_TYPE (x), var);
}
}
tree present;
present = omp_check_optional_argument (ovar, true);
if (present)
{
tree null_label = create_artificial_label (UNKNOWN_LOCATION);
tree notnull_label = create_artificial_label (UNKNOWN_LOCATION);
tree opt_arg_label = create_artificial_label (UNKNOWN_LOCATION);
tree new_x = unshare_expr (x);
gimplify_expr (&present, &ilist, NULL, is_gimple_val,
fb_rvalue);
gcond *cond = gimple_build_cond_from_tree (present,
notnull_label,
null_label);
gimple_seq_add_stmt (&ilist, cond);
gimple_seq_add_stmt (&ilist, gimple_build_label (null_label));
gimplify_assign (new_x, null_pointer_node, &ilist);
gimple_seq_add_stmt (&ilist, gimple_build_goto (opt_arg_label));
gimple_seq_add_stmt (&ilist,
gimple_build_label (notnull_label));
gimplify_assign (x, var, &ilist);
gimple_seq_add_stmt (&ilist,
gimple_build_label (opt_arg_label));
}
else
gimplify_assign (x, var, &ilist);
s = size_int (0);
purpose = size_int (map_idx++);
CONSTRUCTOR_APPEND_ELT (vsize, purpose, s);
gcc_checking_assert (tkind
< (HOST_WIDE_INT_C (1U) << talign_shift));
gcc_checking_assert (tkind
<= tree_to_uhwi (TYPE_MAX_VALUE (tkind_type)));
CONSTRUCTOR_APPEND_ELT (vkind, purpose,
build_int_cstu (tkind_type, tkind));
break;
}
gcc_assert (map_idx == map_cnt);
DECL_INITIAL (TREE_VEC_ELT (t, 1))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 1)), vsize);
DECL_INITIAL (TREE_VEC_ELT (t, 2))
= build_constructor (TREE_TYPE (TREE_VEC_ELT (t, 2)), vkind);
for (int i = 1; i <= 2; i++)
if (!TREE_STATIC (TREE_VEC_ELT (t, i)))
{
gimple_seq initlist = NULL;
force_gimple_operand (build1 (DECL_EXPR, void_type_node,
TREE_VEC_ELT (t, i)),
&initlist, true, NULL_TREE);
gimple_seq_add_seq (&ilist, initlist);
tree clobber = build_clobber (TREE_TYPE (TREE_VEC_ELT (t, i)));
gimple_seq_add_stmt (&olist,
gimple_build_assign (TREE_VEC_ELT (t, i),
clobber));
}
tree clobber = build_clobber (ctx->record_type);
gimple_seq_add_stmt (&olist, gimple_build_assign (ctx->sender_decl,
clobber));
}
/* Once all the expansions are done, sequence all the different
fragments inside gimple_omp_body. */
new_body = NULL;
if (offloaded
&& ctx->record_type)
{
t = build_fold_addr_expr_loc (loc, ctx->sender_decl);
/* fixup_child_record_type might have changed receiver_decl's type. */
t = fold_convert_loc (loc, TREE_TYPE (ctx->receiver_decl), t);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (ctx->receiver_decl, t));
}
gimple_seq_add_seq (&new_body, fplist);
if (offloaded || data_region)
{
tree prev = NULL_TREE;
for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var, x;
default:
break;
case OMP_CLAUSE_FIRSTPRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var)
|| is_gimple_reg_type (TREE_TYPE (var)))
{
tree new_var = lookup_decl (var, ctx);
tree type;
type = TREE_TYPE (var);
if (omp_is_reference (var))
type = TREE_TYPE (type);
if ((INTEGRAL_TYPE_P (type)
&& TYPE_PRECISION (type) <= POINTER_SIZE)
|| TREE_CODE (type) == POINTER_TYPE)
{
x = build_receiver_ref (var, false, ctx);
if (TREE_CODE (type) != POINTER_TYPE)
x = fold_convert (pointer_sized_int_node, x);
x = fold_convert (type, x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
if (omp_is_reference (var))
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
else
{
x = build_receiver_ref (var, !omp_is_reference (var), ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
}
else if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_var = lookup_decl (pvar, ctx);
x = build_receiver_ref (var, false, ctx);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_PRIVATE:
if (is_gimple_omp_oacc (ctx->stmt))
break;
var = OMP_CLAUSE_DECL (c);
if (omp_is_reference (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
{
x = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_var)),
get_name (var));
gimple_add_tmp_var (x);
TREE_ADDRESSABLE (x) = 1;
x = build_fold_addr_expr_loc (clause_loc, x);
}
else
break;
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
case OMP_CLAUSE_USE_DEVICE_PTR:
case OMP_CLAUSE_USE_DEVICE_ADDR:
case OMP_CLAUSE_IS_DEVICE_PTR:
tree new_var;
gimple_seq assign_body;
bool is_array_data;
bool do_optional_check;
assign_body = NULL;
do_optional_check = false;
var = OMP_CLAUSE_DECL (c);
is_array_data = lang_hooks.decls.omp_array_data (var, true) != NULL;
if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IS_DEVICE_PTR)
x = build_sender_ref (is_array_data
? (splay_tree_key) &DECL_NAME (var)
: (splay_tree_key) &DECL_UID (var), ctx);
else
x = build_receiver_ref (var, false, ctx);
if (is_array_data)
{
bool is_ref = omp_is_reference (var);
do_optional_check = true;
/* First, we copy the descriptor data from the host; then
we update its data to point to the target address. */
new_var = lookup_decl (var, ctx);
new_var = DECL_VALUE_EXPR (new_var);
tree v = new_var;
if (is_ref)
{
var = build_fold_indirect_ref (var);
gimplify_expr (&var, &assign_body, NULL, is_gimple_val,
fb_rvalue);
v = create_tmp_var_raw (TREE_TYPE (var), get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (v, var));
tree rhs = build_fold_addr_expr (v);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, rhs));
}
else
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, var));
tree v2 = lang_hooks.decls.omp_array_data (unshare_expr (v), false);
gcc_assert (v2);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (v2, x));
}
else if (is_variable_sized (var))
{
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
new_var = lookup_decl (pvar, ctx);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, x));
}
else if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR
&& !omp_is_reference (var)
&& !omp_is_allocatable_or_ptr (var))
|| TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE)
{
new_var = lookup_decl (var, ctx);
new_var = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (new_var) == MEM_REF);
new_var = TREE_OPERAND (new_var, 0);
gcc_assert (DECL_P (new_var));
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, x));
}
else
{
tree type = TREE_TYPE (var);
new_var = lookup_decl (var, ctx);
if (omp_is_reference (var))
{
type = TREE_TYPE (type);
if (TREE_CODE (type) != ARRAY_TYPE
&& (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_USE_DEVICE_ADDR
|| (omp_is_reference (var)
&& omp_is_allocatable_or_ptr (var))))
{
tree v = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (v);
TREE_ADDRESSABLE (v) = 1;
x = fold_convert (type, x);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val,
fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (v, x));
x = build_fold_addr_expr (v);
do_optional_check = true;
}
}
new_var = DECL_VALUE_EXPR (new_var);
x = fold_convert (TREE_TYPE (new_var), x);
gimplify_expr (&x, &assign_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&assign_body,
gimple_build_assign (new_var, x));
}
tree present;
present = (do_optional_check
? omp_check_optional_argument (OMP_CLAUSE_DECL (c), true)
: NULL_TREE);
if (present)
{
tree null_label = create_artificial_label (UNKNOWN_LOCATION);
tree notnull_label = create_artificial_label (UNKNOWN_LOCATION);
tree opt_arg_label = create_artificial_label (UNKNOWN_LOCATION);
glabel *null_glabel = gimple_build_label (null_label);
glabel *notnull_glabel = gimple_build_label (notnull_label);
ggoto *opt_arg_ggoto = gimple_build_goto (opt_arg_label);
gimplify_expr (&x, &new_body, NULL, is_gimple_val,
fb_rvalue);
gimplify_expr (&present, &new_body, NULL, is_gimple_val,
fb_rvalue);
gcond *cond = gimple_build_cond_from_tree (present,
notnull_label,
null_label);
gimple_seq_add_stmt (&new_body, cond);
gimple_seq_add_stmt (&new_body, null_glabel);
gimplify_assign (new_var, null_pointer_node, &new_body);
gimple_seq_add_stmt (&new_body, opt_arg_ggoto);
gimple_seq_add_stmt (&new_body, notnull_glabel);
gimple_seq_add_seq (&new_body, assign_body);
gimple_seq_add_stmt (&new_body,
gimple_build_label (opt_arg_label));
}
else
gimple_seq_add_seq (&new_body, assign_body);
break;
}
/* Handle GOMP_MAP_FIRSTPRIVATE_{POINTER,REFERENCE} in second pass,
so that firstprivate vars holding OMP_CLAUSE_SIZE if needed
are already handled. Similarly OMP_CLAUSE_PRIVATE for VLAs
or references to VLAs. */
for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
switch (OMP_CLAUSE_CODE (c))
{
tree var;
default:
break;
case OMP_CLAUSE_MAP:
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
|| OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
poly_int64 offset = 0;
gcc_assert (prev);
var = OMP_CLAUSE_DECL (c);
if (DECL_P (var)
&& TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
&& is_global_var (maybe_lookup_decl_in_outer_ctx (var,
ctx))
&& varpool_node::get_create (var)->offloadable)
break;
if (TREE_CODE (var) == INDIRECT_REF
&& TREE_CODE (TREE_OPERAND (var, 0)) == COMPONENT_REF)
var = TREE_OPERAND (var, 0);
if (TREE_CODE (var) == COMPONENT_REF)
{
var = get_addr_base_and_unit_offset (var, &offset);
gcc_assert (var != NULL_TREE && DECL_P (var));
}
else if (DECL_SIZE (var)
&& TREE_CODE (DECL_SIZE (var)) != INTEGER_CST)
{
tree var2 = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (var2) == INDIRECT_REF);
var2 = TREE_OPERAND (var2, 0);
gcc_assert (DECL_P (var2));
var = var2;
}
tree new_var = lookup_decl (var, ctx), x;
tree type = TREE_TYPE (new_var);
bool is_ref;
if (TREE_CODE (OMP_CLAUSE_DECL (c)) == INDIRECT_REF
&& (TREE_CODE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0))
== COMPONENT_REF))
{
type = TREE_TYPE (TREE_OPERAND (OMP_CLAUSE_DECL (c), 0));
is_ref = true;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF)
{
type = TREE_TYPE (OMP_CLAUSE_DECL (c));
is_ref = TREE_CODE (type) == REFERENCE_TYPE;
new_var = build2 (MEM_REF, type,
build_fold_addr_expr (new_var),
build_int_cst (build_pointer_type (type),
offset));
}
else
is_ref = omp_is_reference (var);
if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_REFERENCE)
is_ref = false;
bool ref_to_array = false;
if (is_ref)
{
type = TREE_TYPE (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
type = build_pointer_type (type);
ref_to_array = true;
}
}
else if (TREE_CODE (type) == ARRAY_TYPE)
{
tree decl2 = DECL_VALUE_EXPR (new_var);
gcc_assert (TREE_CODE (decl2) == MEM_REF);
decl2 = TREE_OPERAND (decl2, 0);
gcc_assert (DECL_P (decl2));
new_var = decl2;
type = TREE_TYPE (new_var);
}
x = build_receiver_ref (OMP_CLAUSE_DECL (prev), false, ctx);
x = fold_convert_loc (clause_loc, type, x);
if (!integer_zerop (OMP_CLAUSE_SIZE (c)))
{
tree bias = OMP_CLAUSE_SIZE (c);
if (DECL_P (bias))
bias = lookup_decl (bias, ctx);
bias = fold_convert_loc (clause_loc, sizetype, bias);
bias = fold_build1_loc (clause_loc, NEGATE_EXPR, sizetype,
bias);
x = fold_build2_loc (clause_loc, POINTER_PLUS_EXPR,
TREE_TYPE (x), x, bias);
}
if (ref_to_array)
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
if (is_ref && !ref_to_array)
{
tree t = create_tmp_var_raw (type, get_name (var));
gimple_add_tmp_var (t);
TREE_ADDRESSABLE (t) = 1;
gimple_seq_add_stmt (&new_body,
gimple_build_assign (t, x));
x = build_fold_addr_expr_loc (clause_loc, t);
}
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
prev = NULL_TREE;
}
else if (OMP_CLAUSE_CHAIN (c)
&& OMP_CLAUSE_CODE (OMP_CLAUSE_CHAIN (c))
== OMP_CLAUSE_MAP
&& (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_POINTER
|| (OMP_CLAUSE_MAP_KIND (OMP_CLAUSE_CHAIN (c))
== GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
prev = c;
break;
case OMP_CLAUSE_PRIVATE:
var = OMP_CLAUSE_DECL (c);
if (is_variable_sized (var))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree pvar = DECL_VALUE_EXPR (var);
gcc_assert (TREE_CODE (pvar) == INDIRECT_REF);
pvar = TREE_OPERAND (pvar, 0);
gcc_assert (DECL_P (pvar));
tree new_pvar = lookup_decl (pvar, ctx);
tree atmp = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree al = size_int (DECL_ALIGN (var));
tree x = TYPE_SIZE_UNIT (TREE_TYPE (new_var));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
x = fold_convert_loc (clause_loc, TREE_TYPE (new_pvar), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_pvar, x));
}
else if (omp_is_reference (var) && !is_gimple_omp_oacc (ctx->stmt))
{
location_t clause_loc = OMP_CLAUSE_LOCATION (c);
tree new_var = lookup_decl (var, ctx);
tree x = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_var)));
if (TREE_CONSTANT (x))
break;
else
{
tree atmp
= builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
tree rtype = TREE_TYPE (TREE_TYPE (new_var));
tree al = size_int (TYPE_ALIGN (rtype));
x = build_call_expr_loc (clause_loc, atmp, 2, x, al);
}
x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
gimplify_expr (&x, &new_body, NULL, is_gimple_val, fb_rvalue);
gimple_seq_add_stmt (&new_body,
gimple_build_assign (new_var, x));
}
break;
}
gimple_seq fork_seq = NULL;
gimple_seq join_seq = NULL;
if (offloaded && is_gimple_omp_oacc (ctx->stmt))
{
/* If there are reductions on the offloaded region itself, treat
them as a dummy GANG loop. */
tree level = build_int_cst (integer_type_node, GOMP_DIM_GANG);
lower_oacc_reductions (gimple_location (ctx->stmt), clauses, level,
false, NULL, NULL, &fork_seq, &join_seq, ctx);
}
gimple_seq_add_seq (&new_body, fork_seq);
gimple_seq_add_seq (&new_body, tgt_body);
gimple_seq_add_seq (&new_body, join_seq);
if (offloaded)
{
new_body = maybe_catch_exception (new_body);
gimple_seq_add_stmt (&new_body, gimple_build_omp_return (false));
}
gimple_omp_set_body (stmt, new_body);
}
bind = gimple_build_bind (NULL, NULL,
tgt_bind ? gimple_bind_block (tgt_bind)
: NULL_TREE);
gsi_replace (gsi_p, dep_bind ? dep_bind : bind, true);
gimple_bind_add_seq (bind, ilist);
gimple_bind_add_stmt (bind, stmt);
gimple_bind_add_seq (bind, olist);
pop_gimplify_context (NULL);
if (dep_bind)
{
gimple_bind_add_seq (dep_bind, dep_ilist);
gimple_bind_add_stmt (dep_bind, bind);
gimple_bind_add_seq (dep_bind, dep_olist);
pop_gimplify_context (dep_bind);
}
}
/* Expand code for an OpenMP teams directive. */
static void
lower_omp_teams (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gomp_teams *teams_stmt = as_a <gomp_teams *> (gsi_stmt (*gsi_p));
push_gimplify_context ();
tree block = make_node (BLOCK);
gbind *bind = gimple_build_bind (NULL, NULL, block);
gsi_replace (gsi_p, bind, true);
gimple_seq bind_body = NULL;
gimple_seq dlist = NULL;
gimple_seq olist = NULL;
tree num_teams = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_NUM_TEAMS);
if (num_teams == NULL_TREE)
num_teams = build_int_cst (unsigned_type_node, 0);
else
{
num_teams = OMP_CLAUSE_NUM_TEAMS_EXPR (num_teams);
num_teams = fold_convert (unsigned_type_node, num_teams);
gimplify_expr (&num_teams, &bind_body, NULL, is_gimple_val, fb_rvalue);
}
tree thread_limit = omp_find_clause (gimple_omp_teams_clauses (teams_stmt),
OMP_CLAUSE_THREAD_LIMIT);
if (thread_limit == NULL_TREE)
thread_limit = build_int_cst (unsigned_type_node, 0);
else
{
thread_limit = OMP_CLAUSE_THREAD_LIMIT_EXPR (thread_limit);
thread_limit = fold_convert (unsigned_type_node, thread_limit);
gimplify_expr (&thread_limit, &bind_body, NULL, is_gimple_val,
fb_rvalue);
}
lower_rec_input_clauses (gimple_omp_teams_clauses (teams_stmt),
&bind_body, &dlist, ctx, NULL);
lower_omp (gimple_omp_body_ptr (teams_stmt), ctx);
lower_reduction_clauses (gimple_omp_teams_clauses (teams_stmt), &olist,
NULL, ctx);
if (!gimple_omp_teams_grid_phony (teams_stmt))
{
gimple_seq_add_stmt (&bind_body, teams_stmt);
location_t loc = gimple_location (teams_stmt);
tree decl = builtin_decl_explicit (BUILT_IN_GOMP_TEAMS);
gimple *call = gimple_build_call (decl, 2, num_teams, thread_limit);
gimple_set_location (call, loc);
gimple_seq_add_stmt (&bind_body, call);
}
gimple_seq_add_seq (&bind_body, gimple_omp_body (teams_stmt));
gimple_omp_set_body (teams_stmt, NULL);
gimple_seq_add_seq (&bind_body, olist);
gimple_seq_add_seq (&bind_body, dlist);
if (!gimple_omp_teams_grid_phony (teams_stmt))
gimple_seq_add_stmt (&bind_body, gimple_build_omp_return (true));
gimple_bind_set_body (bind, bind_body);
pop_gimplify_context (bind);
gimple_bind_append_vars (bind, ctx->block_vars);
BLOCK_VARS (block) = ctx->block_vars;
if (BLOCK_VARS (block))
TREE_USED (block) = 1;
}
/* Expand code within an artificial GIMPLE_OMP_GRID_BODY OMP construct. */
static void
lower_omp_grid_body (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
lower_omp (gimple_omp_body_ptr (stmt), ctx);
gimple_seq_add_stmt (gimple_omp_body_ptr (stmt),
gimple_build_omp_return (false));
}
/* Callback for lower_omp_1. Return non-NULL if *tp needs to be
regimplified. If DATA is non-NULL, lower_omp_1 is outside
of OMP context, but with task_shared_vars set. */
static tree
lower_omp_regimplify_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = *tp;
/* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
if (VAR_P (t) && data == NULL && DECL_HAS_VALUE_EXPR_P (t))
return t;
if (task_shared_vars
&& DECL_P (t)
&& bitmap_bit_p (task_shared_vars, DECL_UID (t)))
return t;
/* If a global variable has been privatized, TREE_CONSTANT on
ADDR_EXPR might be wrong. */
if (data == NULL && TREE_CODE (t) == ADDR_EXPR)
recompute_tree_invariant_for_addr_expr (t);
*walk_subtrees = !IS_TYPE_OR_DECL_P (t);
return NULL_TREE;
}
/* Data to be communicated between lower_omp_regimplify_operands and
lower_omp_regimplify_operands_p. */
struct lower_omp_regimplify_operands_data
{
omp_context *ctx;
vec<tree> *decls;
};
/* Helper function for lower_omp_regimplify_operands. Find
omp_member_access_dummy_var vars and adjust temporarily their
DECL_VALUE_EXPRs if needed. */
static tree
lower_omp_regimplify_operands_p (tree *tp, int *walk_subtrees,
void *data)
{
tree t = omp_member_access_dummy_var (*tp);
if (t)
{
struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
lower_omp_regimplify_operands_data *ldata
= (lower_omp_regimplify_operands_data *) wi->info;
tree o = maybe_lookup_decl (t, ldata->ctx);
if (o != t)
{
ldata->decls->safe_push (DECL_VALUE_EXPR (*tp));
ldata->decls->safe_push (*tp);
tree v = unshare_and_remap (DECL_VALUE_EXPR (*tp), t, o);
SET_DECL_VALUE_EXPR (*tp, v);
}
}
*walk_subtrees = !IS_TYPE_OR_DECL_P (*tp);
return NULL_TREE;
}
/* Wrapper around gimple_regimplify_operands that adjusts DECL_VALUE_EXPRs
of omp_member_access_dummy_var vars during regimplification. */
static void
lower_omp_regimplify_operands (omp_context *ctx, gimple *stmt,
gimple_stmt_iterator *gsi_p)
{
auto_vec<tree, 10> decls;
if (ctx)
{
struct walk_stmt_info wi;
memset (&wi, '\0', sizeof (wi));
struct lower_omp_regimplify_operands_data data;
data.ctx = ctx;
data.decls = &decls;
wi.info = &data;
walk_gimple_op (stmt, lower_omp_regimplify_operands_p, &wi);
}
gimple_regimplify_operands (stmt, gsi_p);
while (!decls.is_empty ())
{
tree t = decls.pop ();
tree v = decls.pop ();
SET_DECL_VALUE_EXPR (t, v);
}
}
static void
lower_omp_1 (gimple_stmt_iterator *gsi_p, omp_context *ctx)
{
gimple *stmt = gsi_stmt (*gsi_p);
struct walk_stmt_info wi;
gcall *call_stmt;
if (gimple_has_location (stmt))
input_location = gimple_location (stmt);
if (task_shared_vars)
memset (&wi, '\0', sizeof (wi));
/* If we have issued syntax errors, avoid doing any heavy lifting.
Just replace the OMP directives with a NOP to avoid
confusing RTL expansion. */
if (seen_error () && is_gimple_omp (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
return;
}
switch (gimple_code (stmt))
{
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
if ((ctx || task_shared_vars)
&& (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)
|| walk_tree (gimple_cond_rhs_ptr (cond_stmt),
lower_omp_regimplify_p,
ctx ? NULL : &wi, NULL)))
lower_omp_regimplify_operands (ctx, cond_stmt, gsi_p);
}
break;
case GIMPLE_CATCH:
lower_omp (gimple_catch_handler_ptr (as_a <gcatch *> (stmt)), ctx);
break;
case GIMPLE_EH_FILTER:
lower_omp (gimple_eh_filter_failure_ptr (stmt), ctx);
break;
case GIMPLE_TRY:
lower_omp (gimple_try_eval_ptr (stmt), ctx);
lower_omp (gimple_try_cleanup_ptr (stmt), ctx);
break;
case GIMPLE_TRANSACTION:
lower_omp (gimple_transaction_body_ptr (as_a <gtransaction *> (stmt)),
ctx);
break;
case GIMPLE_BIND:
lower_omp (gimple_bind_body_ptr (as_a <gbind *> (stmt)), ctx);
maybe_remove_omp_member_access_dummy_vars (as_a <gbind *> (stmt));
break;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_taskreg (gsi_p, ctx);
break;
case GIMPLE_OMP_FOR:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_for (gsi_p, ctx);
break;
case GIMPLE_OMP_SECTIONS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (ctx->cancellable)
ctx->cancel_label = create_artificial_label (UNKNOWN_LOCATION);
lower_omp_sections (gsi_p, ctx);
break;
case GIMPLE_OMP_SINGLE:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_single (gsi_p, ctx);
break;
case GIMPLE_OMP_MASTER:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_master (gsi_p, ctx);
break;
case GIMPLE_OMP_TASKGROUP:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_taskgroup (gsi_p, ctx);
break;
case GIMPLE_OMP_ORDERED:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_ordered (gsi_p, ctx);
break;
case GIMPLE_OMP_SCAN:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_scan (gsi_p, ctx);
break;
case GIMPLE_OMP_CRITICAL:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_critical (gsi_p, ctx);
break;
case GIMPLE_OMP_ATOMIC_LOAD:
if ((ctx || task_shared_vars)
&& walk_tree (gimple_omp_atomic_load_rhs_ptr (
as_a <gomp_atomic_load *> (stmt)),
lower_omp_regimplify_p, ctx ? NULL : &wi, NULL))
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
break;
case GIMPLE_OMP_TARGET:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_target (gsi_p, ctx);
break;
case GIMPLE_OMP_TEAMS:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
if (gimple_omp_teams_host (as_a <gomp_teams *> (stmt)))
lower_omp_taskreg (gsi_p, ctx);
else
lower_omp_teams (gsi_p, ctx);
break;
case GIMPLE_OMP_GRID_BODY:
ctx = maybe_lookup_ctx (stmt);
gcc_assert (ctx);
lower_omp_grid_body (gsi_p, ctx);
break;
case GIMPLE_CALL:
tree fndecl;
call_stmt = as_a <gcall *> (stmt);
fndecl = gimple_call_fndecl (call_stmt);
if (fndecl
&& fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
switch (DECL_FUNCTION_CODE (fndecl))
{
case BUILT_IN_GOMP_BARRIER:
if (ctx == NULL)
break;
/* FALLTHRU */
case BUILT_IN_GOMP_CANCEL:
case BUILT_IN_GOMP_CANCELLATION_POINT:
omp_context *cctx;
cctx = ctx;
if (gimple_code (cctx->stmt) == GIMPLE_OMP_SECTION)
cctx = cctx->outer;
gcc_assert (gimple_call_lhs (call_stmt) == NULL_TREE);
if (!cctx->cancellable)
{
if (DECL_FUNCTION_CODE (fndecl)
== BUILT_IN_GOMP_CANCELLATION_POINT)
{
stmt = gimple_build_nop ();
gsi_replace (gsi_p, stmt, false);
}
break;
}
if (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_GOMP_BARRIER)
{
fndecl = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER_CANCEL);
gimple_call_set_fndecl (call_stmt, fndecl);
gimple_call_set_fntype (call_stmt, TREE_TYPE (fndecl));
}
tree lhs;
lhs = create_tmp_var (TREE_TYPE (TREE_TYPE (fndecl)));
gimple_call_set_lhs (call_stmt, lhs);
tree fallthru_label;
fallthru_label = create_artificial_label (UNKNOWN_LOCATION);
gimple *g;
g = gimple_build_label (fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
g = gimple_build_cond (NE_EXPR, lhs,
fold_convert (TREE_TYPE (lhs),
boolean_false_node),
cctx->cancel_label, fallthru_label);
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
break;
default:
break;
}
goto regimplify;
case GIMPLE_ASSIGN:
for (omp_context *up = ctx; up; up = up->outer)
{
if (gimple_code (up->stmt) == GIMPLE_OMP_ORDERED
|| gimple_code (up->stmt) == GIMPLE_OMP_CRITICAL
|| gimple_code (up->stmt) == GIMPLE_OMP_TASKGROUP
|| gimple_code (up->stmt) == GIMPLE_OMP_SECTION
|| gimple_code (up->stmt) == GIMPLE_OMP_SCAN
|| (gimple_code (up->stmt) == GIMPLE_OMP_TARGET
&& (gimple_omp_target_kind (up->stmt)
== GF_OMP_TARGET_KIND_DATA)))
continue;
else if (!up->lastprivate_conditional_map)
break;
tree lhs = get_base_address (gimple_assign_lhs (stmt));
if (TREE_CODE (lhs) == MEM_REF
&& DECL_P (TREE_OPERAND (lhs, 0))
&& TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs,
0))) == REFERENCE_TYPE)
lhs = TREE_OPERAND (lhs, 0);
if (DECL_P (lhs))
if (tree *v = up->lastprivate_conditional_map->get (lhs))
{
tree clauses;
if (up->combined_into_simd_safelen1)
{
up = up->outer;
if (gimple_code (up->stmt) == GIMPLE_OMP_SCAN)
up = up->outer;
}
if (gimple_code (up->stmt) == GIMPLE_OMP_FOR)
clauses = gimple_omp_for_clauses (up->stmt);
else
clauses = gimple_omp_sections_clauses (up->stmt);
tree c = omp_find_clause (clauses, OMP_CLAUSE__CONDTEMP_);
if (!OMP_CLAUSE__CONDTEMP__ITER (c))
c = omp_find_clause (OMP_CLAUSE_CHAIN (c),
OMP_CLAUSE__CONDTEMP_);
gcc_assert (OMP_CLAUSE__CONDTEMP__ITER (c));
gimple *g = gimple_build_assign (*v, OMP_CLAUSE_DECL (c));
gsi_insert_after (gsi_p, g, GSI_SAME_STMT);
}
}
/* FALLTHRU */
default:
regimplify:
if ((ctx || task_shared_vars)
&& walk_gimple_op (stmt, lower_omp_regimplify_p,
ctx ? NULL : &wi))
{
/* Just remove clobbers, this should happen only if we have
"privatized" local addressable variables in SIMD regions,
the clobber isn't needed in that case and gimplifying address
of the ARRAY_REF into a pointer and creating MEM_REF based
clobber would create worse code than we get with the clobber
dropped. */
if (gimple_clobber_p (stmt))
{
gsi_replace (gsi_p, gimple_build_nop (), true);
break;
}
lower_omp_regimplify_operands (ctx, stmt, gsi_p);
}
break;
}
}
static void
lower_omp (gimple_seq *body, omp_context *ctx)
{
location_t saved_location = input_location;
gimple_stmt_iterator gsi;
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
lower_omp_1 (&gsi, ctx);
/* During gimplification, we haven't folded statments inside offloading
or taskreg regions (gimplify.c:maybe_fold_stmt); do that now. */
if (target_nesting_level || taskreg_nesting_level)
for (gsi = gsi_start (*body); !gsi_end_p (gsi); gsi_next (&gsi))
fold_stmt (&gsi);
input_location = saved_location;
}
/* Main entry point. */
static unsigned int
execute_lower_omp (void)
{
gimple_seq body;
int i;
omp_context *ctx;
/* This pass always runs, to provide PROP_gimple_lomp.
But often, there is nothing to do. */
if (flag_openacc == 0 && flag_openmp == 0
&& flag_openmp_simd == 0)
return 0;
all_contexts = splay_tree_new (splay_tree_compare_pointers, 0,
delete_omp_context);
body = gimple_body (current_function_decl);
if (hsa_gen_requested_p ())
omp_grid_gridify_all_targets (&body);
scan_omp (&body, NULL);
gcc_assert (taskreg_nesting_level == 0);
FOR_EACH_VEC_ELT (taskreg_contexts, i, ctx)
finish_taskreg_scan (ctx);
taskreg_contexts.release ();
if (all_contexts->root)
{
if (task_shared_vars)
push_gimplify_context ();
lower_omp (&body, NULL);
if (task_shared_vars)
pop_gimplify_context (NULL);
}
if (all_contexts)
{
splay_tree_delete (all_contexts);
all_contexts = NULL;
}
BITMAP_FREE (task_shared_vars);
BITMAP_FREE (global_nonaddressable_vars);
/* If current function is a method, remove artificial dummy VAR_DECL created
for non-static data member privatization, they aren't needed for
debuginfo nor anything else, have been already replaced everywhere in the
IL and cause problems with LTO. */
if (DECL_ARGUMENTS (current_function_decl)
&& DECL_ARTIFICIAL (DECL_ARGUMENTS (current_function_decl))
&& (TREE_CODE (TREE_TYPE (DECL_ARGUMENTS (current_function_decl)))
== POINTER_TYPE))
remove_member_access_dummy_vars (DECL_INITIAL (current_function_decl));
return 0;
}
namespace {
const pass_data pass_data_lower_omp =
{
GIMPLE_PASS, /* type */
"omplower", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
PROP_gimple_lomp | PROP_gimple_lomp_dev, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_lower_omp : public gimple_opt_pass
{
public:
pass_lower_omp (gcc::context *ctxt)
: gimple_opt_pass (pass_data_lower_omp, ctxt)
{}
/* opt_pass methods: */
virtual unsigned int execute (function *) { return execute_lower_omp (); }
}; // class pass_lower_omp
} // anon namespace
gimple_opt_pass *
make_pass_lower_omp (gcc::context *ctxt)
{
return new pass_lower_omp (ctxt);
}
/* The following is a utility to diagnose structured block violations.
It is not part of the "omplower" pass, as that's invoked too late. It
should be invoked by the respective front ends after gimplification. */
static splay_tree all_labels;
/* Check for mismatched contexts and generate an error if needed. Return
true if an error is detected. */
static bool
diagnose_sb_0 (gimple_stmt_iterator *gsi_p,
gimple *branch_ctx, gimple *label_ctx)
{
gcc_checking_assert (!branch_ctx || is_gimple_omp (branch_ctx));
gcc_checking_assert (!label_ctx || is_gimple_omp (label_ctx));
if (label_ctx == branch_ctx)
return false;
const char* kind = NULL;
if (flag_openacc)
{
if ((branch_ctx && is_gimple_omp_oacc (branch_ctx))
|| (label_ctx && is_gimple_omp_oacc (label_ctx)))
{
gcc_checking_assert (kind == NULL);
kind = "OpenACC";
}
}
if (kind == NULL)
{
gcc_checking_assert (flag_openmp || flag_openmp_simd);
kind = "OpenMP";
}
/* Previously we kept track of the label's entire context in diagnose_sb_[12]
so we could traverse it and issue a correct "exit" or "enter" error
message upon a structured block violation.
We built the context by building a list with tree_cons'ing, but there is
no easy counterpart in gimple tuples. It seems like far too much work
for issuing exit/enter error messages. If someone really misses the
distinct error message... patches welcome. */
#if 0
/* Try to avoid confusing the user by producing and error message
with correct "exit" or "enter" verbiage. We prefer "exit"
unless we can show that LABEL_CTX is nested within BRANCH_CTX. */
if (branch_ctx == NULL)
exit_p = false;
else
{
while (label_ctx)
{
if (TREE_VALUE (label_ctx) == branch_ctx)
{
exit_p = false;
break;
}
label_ctx = TREE_CHAIN (label_ctx);
}
}
if (exit_p)
error ("invalid exit from %s structured block", kind);
else
error ("invalid entry to %s structured block", kind);
#endif
/* If it's obvious we have an invalid entry, be specific about the error. */
if (branch_ctx == NULL)
error ("invalid entry to %s structured block", kind);
else
{
/* Otherwise, be vague and lazy, but efficient. */
error ("invalid branch to/from %s structured block", kind);
}
gsi_replace (gsi_p, gimple_build_nop (), false);
return true;
}
/* Pass 1: Create a minimal tree of structured blocks, and record
where each label is found. */
static tree
diagnose_sb_1 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
gimple *inner_context;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
/* The minimal context here is just the current OMP construct. */
inner_context = stmt;
wi->info = inner_context;
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
inner_context = stmt;
wi->info = inner_context;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq (gimple_omp_for_pre_body (stmt),
diagnose_sb_1, NULL, wi);
walk_gimple_seq (gimple_omp_body (stmt), diagnose_sb_1, NULL, wi);
wi->info = context;
break;
case GIMPLE_LABEL:
splay_tree_insert (all_labels,
(splay_tree_key) gimple_label_label (
as_a <glabel *> (stmt)),
(splay_tree_value) context);
break;
default:
break;
}
return NULL_TREE;
}
/* Pass 2: Check each branch and see if its context differs from that of
the destination label's context. */
static tree
diagnose_sb_2 (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
struct walk_stmt_info *wi)
{
gimple *context = (gimple *) wi->info;
splay_tree_node n;
gimple *stmt = gsi_stmt (*gsi_p);
*handled_ops_p = true;
switch (gimple_code (stmt))
{
WALK_SUBSTMTS;
case GIMPLE_OMP_PARALLEL:
case GIMPLE_OMP_TASK:
case GIMPLE_OMP_SECTIONS:
case GIMPLE_OMP_SINGLE:
case GIMPLE_OMP_SECTION:
case GIMPLE_OMP_MASTER:
case GIMPLE_OMP_ORDERED:
case GIMPLE_OMP_SCAN:
case GIMPLE_OMP_CRITICAL:
case GIMPLE_OMP_TARGET:
case GIMPLE_OMP_TEAMS:
case GIMPLE_OMP_TASKGROUP:
wi->info = stmt;
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_OMP_FOR:
wi->info = stmt;
/* gimple_omp_for_{index,initial,final} are all DECLs; no need to
walk them. */
walk_gimple_seq_mod (gimple_omp_for_pre_body_ptr (stmt),
diagnose_sb_2, NULL, wi);
walk_gimple_seq_mod (gimple_omp_body_ptr (stmt), diagnose_sb_2, NULL, wi);
wi->info = context;
break;
case GIMPLE_COND:
{
gcond *cond_stmt = as_a <gcond *> (stmt);
tree lab = gimple_cond_true_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
lab = gimple_cond_false_label (cond_stmt);
if (lab)
{
n = splay_tree_lookup (all_labels,
(splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context,
n ? (gimple *) n->value : NULL);
}
}
break;
case GIMPLE_GOTO:
{
tree lab = gimple_goto_dest (stmt);
if (TREE_CODE (lab) != LABEL_DECL)
break;
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
diagnose_sb_0 (gsi_p, context, n ? (gimple *) n->value : NULL);
}
break;
case GIMPLE_SWITCH:
{
gswitch *switch_stmt = as_a <gswitch *> (stmt);
unsigned int i;
for (i = 0; i < gimple_switch_num_labels (switch_stmt); ++i)
{
tree lab = CASE_LABEL (gimple_switch_label (switch_stmt, i));
n = splay_tree_lookup (all_labels, (splay_tree_key) lab);
if (n && diagnose_sb_0 (gsi_p, context, (gimple *) n->value))
break;
}
}
break;
case GIMPLE_RETURN:
diagnose_sb_0 (gsi_p, context, NULL);
break;
default:
break;
}
return NULL_TREE;
}
static unsigned int
diagnose_omp_structured_block_errors (void)
{
struct walk_stmt_info wi;
gimple_seq body = gimple_body (current_function_decl);
all_labels = splay_tree_new (splay_tree_compare_pointers, 0, 0);
memset (&wi, 0, sizeof (wi));
walk_gimple_seq (body, diagnose_sb_1, NULL, &wi);
memset (&wi, 0, sizeof (wi));
wi.want_locations = true;
walk_gimple_seq_mod (&body, diagnose_sb_2, NULL, &wi);
gimple_set_body (current_function_decl, body);
splay_tree_delete (all_labels);
all_labels = NULL;
return 0;
}
namespace {
const pass_data pass_data_diagnose_omp_blocks =
{
GIMPLE_PASS, /* type */
"*diagnose_omp_blocks", /* name */
OPTGROUP_OMP, /* optinfo_flags */
TV_NONE, /* tv_id */
PROP_gimple_any, /* properties_required */
0, /* properties_provided */
0, /* properties_destroyed */
0, /* todo_flags_start */
0, /* todo_flags_finish */
};
class pass_diagnose_omp_blocks : public gimple_opt_pass
{
public:
pass_diagnose_omp_blocks (gcc::context *ctxt)
: gimple_opt_pass (pass_data_diagnose_omp_blocks, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *)
{
return flag_openacc || flag_openmp || flag_openmp_simd;
}
virtual unsigned int execute (function *)
{
return diagnose_omp_structured_block_errors ();
}
}; // class pass_diagnose_omp_blocks
} // anon namespace
gimple_opt_pass *
make_pass_diagnose_omp_blocks (gcc::context *ctxt)
{
return new pass_diagnose_omp_blocks (ctxt);
}
#include "gt-omp-low.h"
|
omp-parallel-if.c | #include <omp.h>
extern void abort (void);
int
foo (void)
{
return 10;
}
int
main ()
{
int A = 0;
#pragma omp parallel if (foo () > 10) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 1)
abort ();
#pragma omp parallel if (foo () == 10) num_threads (3) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 3)
abort ();
#pragma omp parallel if (foo () == 10) num_threads (foo ()) shared (A)
{
A = omp_get_num_threads ();
}
if (A != 10)
abort ();
return 0;
}
|
untied_tasks.c | // RUN: %libomp-compile-and-run
// REQUIRES: abt
#include "omp_testsuite.h"
int test_omp_untied_tasks()
{
// https://github.com/pmodels/bolt/issues/49
int val = 0;
#pragma omp parallel
#pragma omp master
{
#pragma omp task untied
{ val = 1; }
}
return val;
}
int test_omp_tied_tasks()
{
int val = 0;
#pragma omp parallel
#pragma omp master
{
#pragma omp task
{ val = 1; }
}
return val;
}
int test_omp_tied_and_untied_tasks()
{
int val1 = 0;
int val2 = 0;
#pragma omp parallel
#pragma omp master
{
#pragma omp task
{ val1 = 1; }
#pragma omp task untied
{ val2 = 1; }
}
return val1 == 1 && val2 == 1;
}
int main()
{
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_untied_tasks()) {
num_failed++;
}
if (!test_omp_tied_tasks()) {
num_failed++;
}
if (!test_omp_tied_and_untied_tasks()) {
num_failed++;
}
}
return num_failed;
}
|
bucle-forModificado.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main(int argc, char **argv) {
int i, n = 9;
if(argc < 2) {
fprintf(stderr,"\n[ERROR] - Falta nº iteraciones \n");
exit(-1);
}
n = atoi(argv[1]);
#pragma omp parallel for
for (i=0; i<n; i++)
printf("thread %d ejecuta la iteración %d del bucle\n",
omp_get_thread_num(),i);
return(0);
}
|
DRB068-restrictpointer2-orig-no.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The restrict type qualifier is an indication to the compiler that,
if the memory addressed by the restrict -qualified pointer is modified, no other pointer will access that same memory.
If a particular chunk of memory is not modified, it can be aliased through more than one restricted pointer.
A C99 restrict feature.
For gcc, you must use -std=c99 to compile this program.
*/
#include <stdlib.h>
#include <stdio.h>
void foo(int n, int * restrict a, int * restrict b, int * restrict c)
{
int i;
#pragma omp parallel for schedule(dynamic)
for (i = 0; i < n; i++)
a[i] = b[i] + c[i];
}
int main()
{
int n = 1000;
int * a , *b, *c;
a = (int*) malloc (n* sizeof (int));
if (a ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
b = (int*) malloc (n* sizeof (int));
if (b ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
c = (int*) malloc (n* sizeof (int));
if (c ==0)
{
fprintf (stderr, "skip the execution due to malloc failures.\n");
return 1;
}
foo (n, a, b,c);
free (a);
free (b);
free (c);
return 0;
}
|
omp_mm.c | /******************************************************************************
* FILE: omp_mm.c
* DESCRIPTION:
* OpenMp Example - Matrix Multiply - C Version
* Demonstrates a matrix multiply using OpenMP. Threads share row iterations
* according to a predefined chunk size.
* AUTHOR: Blaise Barney
* LAST REVISED: 06/28/05
******************************************************************************/
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#define NRA 862 /* number of rows in matrix A */
#define NCA 865 /* number of columns in matrix A */
#define NCB 867 /* number of columns in matrix B */
int main (int argc, char *argv[])
{
int tid, nthreads, i, j, k, chunk;
double a[NRA][NCA], /* matrix A to be multiplied */
b[NCA][NCB], /* matrix B to be multiplied */
c[NRA][NCB]; /* result matrix C */
chunk = 10; /* set loop iteration chunk size */
/*** Spawn a parallel region explicitly scoping all variables ***/
#pragma omp parallel shared(a,b,c,nthreads,chunk) private(tid,i,j,k)
{
tid = omp_get_thread_num();
if (tid == 0)
{
nthreads = omp_get_num_threads();
printf("Starting matrix multiple example with %d threads\n",nthreads);
printf("Initializing matrices...\n");
}
/*** Initialize matrices ***/
#include "omp_helper.h"
/*** Do matrix multiply sharing iterations on outer loop ***/
/*** Display who does which iterations for demonstration purposes ***/
printf("Thread %d starting matrix multiply...\n",tid);
#include "directive.h"
for (i=0; i<NRA; i++)
{
// printf("Thread=%d did row=%d\n",tid,i);
for(j=0; j<NCB; j++)
for (k=0; k<NCA; k++)
c[i][j] += a[i][k] * b[k][j];
}
} /*** End of parallel region ***/
printf ("Done.\n");
}
|
3d25pt.c | /*
* Order-2, 3D 25 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
#ifndef min
#define min(x,y) ((x) < (y)? (x) : (y))
#endif
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
double ***roc2 = (double ***) malloc(sizeof(double**));
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
roc2 = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
roc2[i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
roc2[i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
roc2[i][j][k] = 2.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
const double coef0 = -0.28472;
const double coef1 = 0.16000;
const double coef2 = -0.02000;
const double coef3 = 0.00254;
const double coef4 = -0.00018;
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] = 2.0*A[t%2][i][j][k] - A[(t+1)%2][i][j][k] + roc2[i][j][k]*(
coef0* A[t%2][i ][j ][k ] +
coef1*(A[t%2][i-1][j ][k ] + A[t%2][i+1][j ][k ] +
A[t%2][i ][j-1][k ] + A[t%2][i ][j+1][k ] +
A[t%2][i ][j ][k-1] + A[t%2][i ][j ][k+1]) +
coef2*(A[t%2][i-2][j ][k ] + A[t%2][i+2][j ][k ] +
A[t%2][i ][j-2][k ] + A[t%2][i ][j+2][k ] +
A[t%2][i ][j ][k-2] + A[t%2][i ][j ][k+2]) +
coef3*(A[t%2][i-3][j ][k ] + A[t%2][i+3][j ][k ] +
A[t%2][i ][j-3][k ] + A[t%2][i ][j+3][k ] +
A[t%2][i ][j ][k-3] + A[t%2][i ][j ][k+3]) +
coef4*(A[t%2][i-4][j ][k ] + A[t%2][i+4][j ][k ] +
A[t%2][i ][j-4][k ] + A[t%2][i ][j+4][k ] +
A[t%2][i ][j ][k-4] + A[t%2][i ][j ][k+4]) );
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = MIN(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
free(roc2[i][j]);
}
free(A[0][i]);
free(A[1][i]);
free(roc2[i]);
}
free(A[0]);
free(A[1]);
free(roc2);
return 0;
}
|
GB_subassign_08s_and_16.c | //------------------------------------------------------------------------------
// GB_subassign_08s_and_16: C(I,J)<M or !M> += A ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 08s: C(I,J)<M> += A ; using S
// Method 16: C(I,J)<!M> += A ; using S
// M: present
// Mask_comp: true or false
// C_replace: false
// accum: present
// A: matrix
// S: constructed
// C: not bitmap: use GB_bitmap_assign instead
// M, A: any sparsity structure.
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_08s_and_16
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_Matrix M,
const bool Mask_struct, // if true, use the only structure of M
const bool Mask_comp, // if true, !M, else use M
const GrB_BinaryOp accum,
const GrB_Matrix A,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M
ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_MATRIX_WAIT_IF_JUMBLED (M) ;
GB_MATRIX_WAIT_IF_JUMBLED (A) ;
GB_GET_C ; // C must not be bitmap
GB_GET_MASK ;
GB_GET_A ;
GB_GET_S ;
GB_GET_ACCUM ;
//--------------------------------------------------------------------------
// Method 16: C(I,J)<!M> += A ; using S
//--------------------------------------------------------------------------
// Time: Close to optimal. All entries in A+S must be traversed.
//--------------------------------------------------------------------------
// Method 08s: C(I,J)<M> += A ; using S
//--------------------------------------------------------------------------
// Time: Only entries in A must be traversed, and the corresponding entries
// in C located. This method constructs S and traverses all of it in the
// worst case. Compare with method 08n, which does not construct S but
// instead uses a binary search for entries in C, but it only traverses
// entries in A.*M.
//--------------------------------------------------------------------------
// Parallel: A+S (Methods 02, 04, 09, 10, 11, 12, 14, 16, 18, 20)
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
// all of IxJ must be examined
GB_SUBASSIGN_IXJ_SLICE ;
}
else
{
// traverse all A+S
GB_SUBASSIGN_TWO_SLICE (A, S) ;
}
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase1: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (Sfound && !Afound)
{
// S (i,j) is present but A (i,j) is not
// ----[C . 1] or [X . 1]-------------------------------
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
// ----[C . 0] or [X . 0]-------------------------------
// [C . 0]: action: ( C ): no change, with accum
// [X . 0]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (!Sfound && Afound)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
}
else if (Sfound && Afound)
{
// both S (i,j) and A (i,j) present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): A to C no accum
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
}
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase1: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE1 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
// int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// S (i,j) is present but A (i,j) is not
// ----[C . 1] or [X . 1]-------------------------------
// [C . 1]: action: ( C ): no change, with accum
// [X . 1]: action: ( X ): still a zombie
// ----[C . 0] or [X . 0]-------------------------------
// [C . 0]: action: ( C ): no change, with accum
// [X . 0]: action: ( X ): still a zombie
GB_NEXT (S) ;
}
else if (iA < iS)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (A) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[C A 1] or [X A 1]---------------------------
// [C A 1]: action: ( =A ): A to C no accum
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_matrix ;
}
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// ignore the remainder of S(:,j)
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// S (i,j) is not present, A (i,j) is present
int64_t iA = GBI (Ai, pA, Avlen) ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
task_pending++ ;
}
GB_NEXT (A) ;
}
}
GB_PHASE1_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
if (A_is_bitmap)
{
//----------------------------------------------------------------------
// phase2: A is bitmap
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//--------------------------------------------------------------
// get S(iA_start:iA_end,j)
//--------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
int64_t pA_start = j * Avlen ;
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(iA_start:iA_end,j) and A(ditto,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
int64_t pA = pA_start + iA ;
bool Sfound = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
bool Afound = Ab [pA] ;
if (!Sfound && Afound)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
}
}
else if (Sfound)
{
// S (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
else
{
//----------------------------------------------------------------------
// phase2: A is hypersparse, sparse, or full
//----------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//------------------------------------------------------------------
// get the task descriptor
//------------------------------------------------------------------
GB_GET_TASK_DESCRIPTOR_PHASE2 ;
//------------------------------------------------------------------
// compute all vectors in this task
//------------------------------------------------------------------
for (int64_t k = kfirst ; k <= klast ; k++)
{
//--------------------------------------------------------------
// get A(:,j) and S(:,j)
//--------------------------------------------------------------
int64_t j = GBH (Zh, k) ;
GB_GET_MAPPED (pA, pA_end, pA, pA_end, Ap, j, k, Z_to_X, Avlen);
GB_GET_MAPPED (pS, pS_end, pB, pB_end, Sp, j, k, Z_to_S, Svlen);
//--------------------------------------------------------------
// get M(:,j)
//--------------------------------------------------------------
int64_t pM_start, pM_end ;
GB_VECTOR_LOOKUP (pM_start, pM_end, M, j) ;
bool mjdense = (pM_end - pM_start) == Mvlen ;
//--------------------------------------------------------------
// do a 2-way merge of S(:,j) and A(:,j)
//--------------------------------------------------------------
// jC = J [j] ; or J is a colon expression
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
// while both list S (:,j) and A (:,j) have entries
while (pS < pS_end && pA < pA_end)
{
int64_t iS = GBI (Si, pS, Svlen) ;
int64_t iA = GBI (Ai, pA, Avlen) ;
if (iS < iA)
{
// S (i,j) is present but A (i,j) is not
GB_NEXT (S) ;
}
else if (iA < iS)
{
// S (i,j) is not present, A (i,j) is present
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]--------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
}
GB_NEXT (A) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_NEXT (S) ;
GB_NEXT (A) ;
}
}
// while list A (:,j) has entries. List S (:,j) exhausted.
while (pA < pA_end)
{
// S (i,j) is not present, A (i,j) is present
int64_t iA = GBI (Ai, pA, Avlen) ;
GB_MIJ_BINARY_SEARCH_OR_DENSE_LOOKUP (iA) ;
if (Mask_comp) mij = !mij ;
if (mij)
{
// ----[. A 1]------------------------------------------
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT_aij ;
}
GB_NEXT (A) ;
}
}
GB_PHASE2_TASK_WRAPUP ;
}
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
DRB025-simdtruedep-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This one has race condition due to true dependence.
But data races happen at instruction level, not thread level.
Data race pair: a[i+1]@68:5 vs. a[i]@68:12
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len], b[len];
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i+1;
}
#pragma omp simd
for (i=0;i<len-1;i++)
a[i+1]=a[i]*b[i];
return 0;
}
|
stencil2d_itlmic_kernel.c | #include "stencil2d.h"
#include <offload.h>
#include <homp.h>
void stencil2d_itlmic_wrapper(omp_offloading_t *off, int start, int len, long n, long m, int u_dimX, int u_dimY, REAL *u, REAL *uold, int radius,int coeff_dimX, REAL *coeff)
{
int ix, iy, ir;
int count = 4*radius+1;
#ifdef SQUARE_SETNCIL
count = coeff_dimX * coeff_dimX;
#endif
#pragma offload target(mic:off->dev->sysid) in (u: length(0) alloc_if(0) free_if(0)) \
in (uold: length(0) alloc_if(0) free_if(0)) \
in (coeff: length(0) alloc_if(0) free_if(0))
{
#pragma omp parallel for
for (ix = start; ix < start+len; ix++) {
REAL * temp_u = &u[(ix+radius)*u_dimY+radius];
REAL * temp_uold = &uold[(ix+radius)*u_dimY+radius];
#pragma omp simd
for (iy = 0; iy < m; iy++) {
// if (off->devseqid == 0)printf("dev: %d, [%d][%d]:%f\n", off->devseqid, ix, iy, temp_u[0]);
REAL result = temp_uold[0] * coeff[0];
/* 2/4 way loop unrolling */
for (ir = 1; ir <= radius; ir++) {
result += coeff[ir] * temp_uold[ir]; //horizontal right
result += coeff[-ir]* temp_uold[-ir]; // horizontal left
result += coeff[-ir*coeff_dimX] * temp_uold[-ir * u_dimY]; //vertical up
result += coeff[ir*coeff_dimX] * temp_uold[ir * u_dimY]; // vertical bottom
#ifdef SQUARE_SETNCIL
result += coeff[-ir*coeff_dimX-ir] * temp_uold[-ir * u_dimY-ir] // left upper corner
result += coeff[-ir*coeff_dimX+ir] * temp_uold[-ir * u_dimY+ir] // right upper corner
result += coeff[ir*coeff_dimX-ir] * temp_uold[ir * u_dimY]-ir] // left bottom corner
result += coeff[ir*coeff_dimX+ir] * temp_uold[ir * u_dimY]+ir] // right bottom corner
#endif
}
*temp_u = result/count;
temp_u++;
temp_uold++;
}
}
}
}
|
mathematigames.c | /*
Andi Farhan (2006521616)
Aryoshi Wicaksono (2006532140)
Gemilang Bagas Ramadhani (2006535205)
Nabil Mafaza (2006529133)
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <omp.h>
#include <time.h>
#include <ctype.h>
struct soal
{
char pertanyaan[100];
char opsiA[10];
char opsiB[10];
char opsiC[10];
char opsiD[10];
char jawabanPilgan[10];
char jawabanIsian[10];
int random;
struct soal *link;
};
struct data
{
char nama[20];
char tingkatKesulitan[6];
double score;
struct data *link;
};
void inputInt(int *input);
int cleanInput();
struct soal *tambahSoal(struct soal *head); //Fungsi untuk menambah soal dan jawaban.
struct soal *modifikasiSoal(struct soal *ptr); // Fungsi untuk memodifikasi soal.
void hapusSoal(struct soal **ptr); //Fungsi untuk menghapus soal dan jawaban.
struct soal *soal_rb(struct soal *head); //Fungsi untuk ambil soal dari .bin
struct soal *soal_next_rb(struct soal *head, FILE *filesoal); // Fungsi untuk mencari soal selanjutnya yang ada di .bin
void soal_wb(struct soal *head); // Fungsi untuk menyimpan soal di .bin
struct data *hi_score_rd(struct data *stackptr); //Ambil data data dari .bin
struct data *hi_score_new(struct data *newEntry, struct data *stackptr); //Buat entry baru di data
void hi_score_pr(struct data *stackptr); //Cetak data ke output
void hi_score_wb(struct data *stackptr); //Tulis kembali ke file bin
void game(struct data **startData, struct soal *startSoal); //Fugnsi untuk mengerjakan soal
void randomSoal(struct soal *head); // Fungsi untuk mengacak urutan soal yang ditampilkan
int main(void)
{
srand(time(0));
int selectMenu;
char kataSandi[12];
struct soal *headSoal = NULL;
struct data *headData = NULL;
#pragma omp parallel sections
{
#pragma omp section
headSoal = soal_rb(headSoal);
#pragma omp section
headData = hi_score_rd(headData);
}
do
{
system("cls");
printf("\n\n\n\t\t\tSilahkan Memilih Menu Yang Anda Inginkan\n\n\n");
printf("\t\t\t1.Informasi Permainan\n");
printf("\t\t\t2.Memulai Permainan\n");
printf("\t\t\t3.Memodifikasi Library Soal\n");
printf("\t\t\t4.Tambah Soal\n");
printf("\t\t\t5.Delete Soal\n");
printf("\t\t\t6.Menampilkan Highscore\n");
printf("\t\t\t7.Keluar\n\n\n");
printf("\t\t\tMenu yang dipilih: ");
//men-scan menu yang dipilih
scanf("%d", &selectMenu);
//Fungsi yang melaksanakan tugas berdasarkan pilihan menu dari pengguna.
switch (selectMenu)
{
case 1:
system("cls");
printf("\n\n\n\t\t\tMathematigame adalah program yang memungkinkan admin\n");
printf("\t\t\tuntuk menuliskan soal-soal matematika beserta jawaban\n");
printf("\t\t\tdan menjadikan soal-soal yang telah ditulis sebagai kuis\n");
printf("\t\t\tuntuk dikerjakan oleh pengguna lain. Skor akhir pengguna\n");
printf("\t\t\takan diurutkan dengan nilai tertinggi terletak pada puncak\n");
printf("\t\t\tdan ditulis pada file .txt.\n\n");
system("pause");
break;
case 2:
system("cls");
randomSoal(headSoal);
game(&headData, headSoal);
break;
case 3:
system("cls");
printf("Masukkan kata sandi: ");
scanf(" %[^\n]s", kataSandi);
if (strcmp("proglanasyik", kataSandi) != 0)
{
printf("Kata sandi yang Anda masukkan salah. Anda akan dikembalikan ke menu awal.\n");
system("pause");
break;
}
system("cls");
headSoal = modifikasiSoal(headSoal);
//Mengambil input nama dan pilihan tingkat kesulitan
//bumper
//memulai permainan
break;
case 4:
system("cls");
printf("Masukkan kata sandi: ");
scanf(" %[^\n]s", kataSandi);
if (strcmp("proglanasyik", kataSandi) != 0)
{
printf("Kata sandi yang Anda masukkan salah. Anda akan dikembalikan ke menu awal.\n");
system("pause");
break;
}
system("cls");
headSoal = tambahSoal(headSoal);
break;
case 5:
system("cls");
printf("Masukkan kata sandi: ");
scanf(" %[^\n]s", kataSandi);
if (strcmp("proglanasyik", kataSandi) != 0)
{
printf("Kata sandi yang Anda masukkan salah. Anda akan dikembalikan ke menu awal.\n");
system("pause");
break;
}
system("cls");
hapusSoal(&headSoal);
break;
case 6:
system("cls");
hi_score_pr(headData);
break;
case 7:
break;
}
} while (selectMenu != 7);
#pragma omp parallel sections
{
#pragma omp section
soal_wb(headSoal);
#pragma omp section
hi_score_wb(headData);
}
return 0;
}
struct soal *modifikasiSoal(struct soal *ptr)
//Fungsi untuk memodifikasi soal dan jawaban dalam struct soal.
{
int i = 1;
int urutan = 1;
struct soal *temp = ptr;
if (temp != NULL)
{
while (temp != NULL) //Mencetak seluruh soal yang ada.
{
printf("%d)\n", i);
printf("Soal: %s\n", temp->pertanyaan);
printf("Opsi jawaban A: %s\n", temp->opsiA);
printf("Opsi jawaban B: %s\n", temp->opsiB);
printf("Opsi jawaban C: %s\n", temp->opsiC);
printf("Opsi jawaban D: %s\n", temp->opsiD);
printf("Jawaban pilihan ganda: %s\n", temp->jawabanPilgan);
printf("Jawaban isian: %s\n\n", temp->jawabanIsian);
i++;
temp = temp->link;
}
i--;
temp = ptr;
printf("Masukkan urutan soal yang Anda ingin ganti: "); //Memasukkan urutan soal yang ingin dimodifikasi.
inputInt(&urutan);
while (urutan > i)
{
printf("Soal tidak ditemukan! Harap masukkan angka yang benar!\n");
inputInt(&urutan);
}
i = 1;
while (i != urutan)
{
i++;
temp = temp->link;
}
system("cls");
printf("Masukkan soal baru:\n"); //Memodifikasi soal yang telah dipilih.
scanf(" %[^\n]s", temp->pertanyaan);
printf("Masukkan opsi jawaban A baru:\n");
scanf(" %[^\n]s", temp->opsiA);
printf("Masukkan opsi jawaban B baru:\n");
scanf(" %[^\n]s", temp->opsiB);
printf("Masukkan opsi jawaban C baru:\n");
scanf(" %[^\n]s", temp->opsiC);
printf("Masukkan opsi jawaban D baru:\n");
scanf(" %[^\n]s", temp->opsiD);
printf("Masukkan jawaban pilihan ganda baru:\n");
scanf(" %[^\n]s", temp->jawabanPilgan);
printf("Masukkan jawaban isian baru:\n");
scanf(" %[^\n]s", temp->jawabanIsian);
}
else
{
printf("Soal tidak ditemukan\n");
getch();
}
return ptr;
}
void inputInt(int *input)
{
char newline;
while ((scanf("%d%c", input, &newline) != 2 || newline != '\n') && cleanInput())
{
puts("Masukkan angka yang benar.");
}
}
int cleanInput()
{
while (getchar() != '\n')
;
return 1;
}
void game(struct data **startData, struct soal *startSoal)
{
int nyawa;
char jawab[10];
float jawabIsian;
int jumlahBenar = 0;
int urutan;
char namaPemain[20];
struct soal *currentSoal = startSoal;
struct data *currentData = malloc(sizeof(struct data));
currentData->link = NULL;
printf("Nama : ");
scanf(" %[^\n]s", currentData->nama);
printf("Tingkat Kesulitan (Mudah, Sedang, Sulit) :");
scanf("%s", currentData->tingkatKesulitan);
//Mengecek input kesulitan agar sesuai
while (strcmpi(currentData->tingkatKesulitan, "MUDAH") != 0 && strcmpi(currentData->tingkatKesulitan, "SEDANG") != 0 && strcmpi(currentData->tingkatKesulitan, "SULIT") != 0)
{
printf("Masukkan tingkat kesulitan yang benar!\n");
printf("Tingkat Kesulitan :");
scanf("%s", currentData->tingkatKesulitan);
}
//untuk tingkat kesulitan mudah akan diberi 5 nyawa, sedangkan yang lain 3 nyawa
if (strcmpi(currentData->tingkatKesulitan, "MUDAH") == 0)
{
nyawa = 5;
}
else
nyawa = 3;
if (currentSoal != NULL)
{
//Menampilkan soal 5 soal atau kurang
while (currentSoal != NULL && urutan != 5)
{
//Apabila nyawa habis maka masuk penghitungan skor
if (nyawa == 0)
{
break;
}
++urutan;
// Apabila difficulty MUDAH atau SEDANG, jawaba soal merupakan pilihan ganda
if (strcmpi(currentData->tingkatKesulitan, "MUDAH") == 0 || strcmpi(currentData->tingkatKesulitan, "SEDANG") == 0)
{
printf("\n%s\n\n", currentSoal->pertanyaan);
printf("A. %-10s C. %-10s\n", currentSoal->opsiA, currentSoal->opsiC);
printf("B. %-10s D. %-10s\n", currentSoal->opsiB, currentSoal->opsiD);
}
else
{ // Apabila difficulty SULIT, maka jawaban soal merupakan isian
printf("%s\n", currentSoal->pertanyaan);
}
printf("\nSisa nyawa : %d \n\n", nyawa);
printf("Jawab : ");
scanf("%s", jawab);
//Mencocokkan input jawaban dengan jawaban soal
if (strcmpi(jawab, currentSoal->jawabanPilgan) == 0 || strcmpi(jawab, currentSoal->jawabanIsian) == 0)
{
printf("\n\tBENAR!");
jumlahBenar += 1;
}
else
{
printf("\n\tSALAH!");
--nyawa;
}
getch();
system("cls");
currentSoal = currentSoal->link;
}
//Penghitungan skor
if (strcmpi(currentData->tingkatKesulitan, "MUDAH") == 0)
{
currentData->score = jumlahBenar * 10;
}
else if (strcmpi(currentData->tingkatKesulitan, "SEDANG") == 0)
{
currentData->score = jumlahBenar * 15;
}
else
{
currentData->score = jumlahBenar * 20;
}
//Memasukkan data ke sorting Highscore
*startData = hi_score_new(currentData, *startData);
//Mencetak hasil skor
printf("\n Skor : %1f \n\n", currentData->score);
getch();
}
else
{
printf("Maaf, soal tidak ditemukan \n");
}
}
struct soal *tambahSoal(struct soal *head)
//Fungsi untuk menambah soal dan jawaban dalam struct soal.
{
struct soal *temp = malloc(sizeof(struct soal)); //Membuat struct soal dengan nama temp untuk digunakan di dalam fungsi.
temp->link = NULL;
temp->random = rand();
printf("Masukkan soal baru:\n"); //Meminta admin untuk memasukkan soal beserta jawaban-jawaban
scanf(" %[^\n]s", temp->pertanyaan);
printf("Masukkan opsi jawaban A baru:\n");
scanf(" %[^\n]s", temp->opsiA);
printf("Masukkan opsi jawaban B baru:\n");
scanf(" %[^\n]s", temp->opsiB);
printf("Masukkan opsi jawaban C baru:\n");
scanf(" %[^\n]s", temp->opsiC);
printf("Masukkan opsi jawaban D baru:\n");
scanf(" %[^\n]s", temp->opsiD);
printf("Masukkan jawaban pilihan ganda baru:\n");
scanf(" %[^\n]s", temp->jawabanPilgan);
printf("Masukkan jawaban isian baru:\n");
scanf(" %[^\n]s", temp->jawabanIsian);
if (head == NULL) //Memasukkan soal baru pada linked list.
{
head = temp;
}
else
{
struct soal *currentSoal;
currentSoal = head;
while (currentSoal->link != NULL)
{
currentSoal = currentSoal->link;
}
currentSoal->link = temp;
}
return head;
}
void hapusSoal(struct soal **ptr)
//Fungsi untuk mengapus soal dalam struct soal.
{
#pragma omp parallel
{
#pragma omp single
{
int i = 0, posisi;
struct soal *sebelum = *ptr;
struct soal *sekarang = *ptr;
struct soal *head = *ptr;
while (head != NULL) //Mencetak semua soal yang ada.
{
i++;
printf("%d)\n", i);
printf("Soal: %s\n", head->pertanyaan);
printf("Opsi jawaban A: %s\n", head->opsiA);
printf("Opsi jawaban B: %s\n", head->opsiB);
printf("Opsi jawaban C: %s\n", head->opsiC);
printf("Opsi jawaban D: %s\n", head->opsiD);
printf("Jawaban pilihan ganda: %s\n", head->jawabanPilgan);
printf("Jawaban isian: %s\n\n", head->jawabanIsian);
head = head->link;
}
printf("Masukkan urutan soal yang ingin Anda hapus: "); //Meminta admin untuk memasukkan urutan soal yang ingin dihapus.
inputInt(&posisi);
if (posisi > i)
{
printf("\nSoal tidak ditemukan! Masukkan urutan soal yang ingin Anda hapus: ");
inputInt(&posisi);
}
if (*ptr == NULL)
{
printf("Tidak ada soal.\n\n");
system("pause");
}
else if (posisi == 1) //Menghapus soal apabila urutan soal yang ingin dihapus adalah soal ke-1.
{
*ptr = sekarang->link;
free(sekarang);
sekarang = NULL;
}
else
{
while (posisi != 1) //Menghapus soal apabila urutan soal yang ingin dihapus bukan soal ke-1.
{
sebelum = sekarang;
sekarang = sekarang->link;
posisi--;
}
sebelum->link = sekarang->link;
free(sekarang);
sekarang = NULL;
}
}
}
}
void soal_wb(struct soal *head)
{
FILE *filesoal;
filesoal = fopen("soal.bin", "wb");
if (filesoal != NULL)
{
struct soal *soalSekarang = head;
struct soal *temp = NULL;
while (soalSekarang != NULL)
{
temp = soalSekarang->link; //simpan address currentsiswa selanjutnya
soalSekarang->link = NULL; //kosongin link currentsiswa
fseek(filesoal, 0, SEEK_END);
fwrite(soalSekarang, sizeof(struct soal), 1, filesoal);
soalSekarang = temp; //lanjut ke list selanjutnya
temp = NULL;
}
fclose(filesoal);
filesoal = NULL;
}
else
{
printf("FILE OPEN ERROR\n");
}
}
struct soal *soal_next_rb(struct soal *head, FILE *filesoal)
{
if (head == NULL)
{
head = malloc(sizeof(struct soal));
fread(head, sizeof(struct soal), 1, filesoal);
head->link = NULL;
}
else
{
struct soal *indexData = head;
struct soal *newData = malloc(sizeof(struct soal));
while (indexData->link != NULL)
{
indexData = indexData->link;
}
fread(newData, sizeof(struct soal), 1, filesoal);
indexData->link = newData;
newData->link = NULL;
}
return head;
}
struct soal *soal_rb(struct soal *head)
{
FILE *filesoal;
filesoal = fopen("soal.bin", "rb");
if (filesoal != NULL)
{
head = NULL;
fseek(filesoal, 0, SEEK_END);
long fileSize = ftell(filesoal);
rewind(filesoal);
//Mencari jumlah soal yang diinput
int jumlahsoal = (int)(fileSize / (sizeof(struct soal)));
int i = 0;
//memasukkan soal dari file ke linkedlist
for (i = 0; i < jumlahsoal; ++i)
{
fseek(filesoal, (sizeof(struct soal) * i), SEEK_SET);
head = soal_next_rb(head, filesoal);
}
fclose(filesoal);
}
else
{
printf("FILE OPEN ERROR FOR READ\n");
}
return head;
}
struct data *hi_score_rd(struct data *stackptr)
/* Membaca file .bin yang menyimpan leaderboard */
{
int i = 0;
int seek = 1;
FILE *hiscore;
struct data *player = NULL;
hiscore = fopen("hiscore.bin", "rb");
if (hiscore == NULL)
{
puts("data still empty...");
}
else
{
player = malloc(sizeof(struct data));
while (fgetc(hiscore) != '\t')
/*
Read nama satu per satu sampai \t ditemukan
sebagai tanda akhir nama, lihat hi_score_wb untuk detilnya
*/
{
fseek(hiscore, -1, SEEK_CUR);
fread(&player->nama[i], 1, 1, hiscore);
i++;
}
player->nama[i] = '\0'; // Memastikan string berhenti di akhir nama
fread(&player->tingkatKesulitan, sizeof(player->tingkatKesulitan), 1, hiscore);
fread(&player->score, sizeof(double), 1, hiscore);
i = 0;
player->link = NULL;
stackptr = player;
}
while (fgetc(hiscore) != EOF)
{
player = malloc(sizeof(struct data));
fseek(hiscore, -1, SEEK_CUR);
while (fgetc(hiscore) != '\t')
{
fseek(hiscore, -1, SEEK_CUR);
fread(&player->nama[i], 1, 1, hiscore);
i++;
}
player->nama[i] = '\0';
fread(&player->tingkatKesulitan, sizeof(player->tingkatKesulitan), 1, hiscore);
fread(&player->score, sizeof(double), 1, hiscore);
i = 0;
player->link = stackptr;
stackptr = player;
}
fclose(hiscore);
puts("END of file read..");
return stackptr;
}
struct data *hi_score_new(struct data *newEntry, struct data *stackptr)
/* New entry ke leaderboard */
{
struct data *prev = NULL;
struct data *topstack;
topstack = stackptr;
do
{
if (topstack == NULL) // Entry pertama
{
topstack = newEntry;
break;
}
else if (newEntry->score < stackptr->score) //Sorting
{
if (stackptr->link == NULL) //Nilai paling kecil
{
stackptr->link = newEntry;
newEntry->link = NULL;
break;
}
else //Swap
{
prev = stackptr;
stackptr = stackptr->link;
}
}
else if (stackptr == topstack && newEntry->score > stackptr->score) //Nilai paling besar
{
newEntry->link = stackptr;
topstack = newEntry;
break;
}
else // Nilai bukan terkecil maupun terbesar
{
prev->link = newEntry;
newEntry->link = stackptr;
break;
}
} while (stackptr != NULL);
// List/node terakhir adalah nilai paling kecil
return topstack;
}
void hi_score_pr(struct data *stackptr)
/* Printing leaderboard ke output */
{
int rank = 1;
int i;
char uppercase[6];
puts("\n\n\n\t=================================================");
puts("\t| No | Nama | Kesulitan | Nilai |");
puts("\t=================================================");
while (stackptr != NULL)
{
printf("\t| %i. | %-20s | ", rank, stackptr->nama);
strcpy(uppercase, stackptr->tingkatKesulitan);
for (i = 0; i < 6; i++)
printf("%c", toupper(uppercase[i]));
if (strcmpi(uppercase, "sedang") == 0)
printf(" "); //Keperluan formatting tabel
else
printf(" "); //Keperluan formatting tabel
printf("| %5.1lf |\n", stackptr->score);
stackptr = stackptr->link;
rank++;
}
puts("\t=================================================");
getch();
}
void hi_score_wb(struct data *stackptr)
/*
Menulis leaderboard ke dalam file .bin. Peringkat terakhir
ditulis terlebih dahulu agar pada saat pembacaan di run berikutnya
peringkat terakhir ada di list/node terakhir dari stack
*/
{
struct data *toWrite, *topstack;
struct data *prev = NULL;
FILE *hiscore;
if (stackptr != NULL)
{
hiscore = fopen("hiscore.bin", "wb");
topstack = stackptr;
while (toWrite != topstack) // Bagian ini mengambil alamat terakhir dari list/node.
{
while (1)
{
if (stackptr->link == NULL)
{
toWrite = stackptr;
break;
}
else
{
prev = stackptr;
stackptr = stackptr->link;
}
}
puts("Writing to file . . .");
fwrite(&toWrite->nama, sizeof(toWrite->nama), 1, hiscore);
fputc('\t', hiscore); //Menuliskan \t setelah nama sebagai penanda akhir dari nama
fwrite(&toWrite->tingkatKesulitan, sizeof(toWrite->tingkatKesulitan), 1, hiscore);
fwrite(&toWrite->score, sizeof(toWrite->score), 1, hiscore);
if (prev != NULL)
prev->link = NULL;
stackptr = topstack;
puts("DONE !");
}
fclose(hiscore);
puts("ALL DONE !");
}
}
void randomSoal(struct soal *head)
{
/*
Fungsi ini untuk mengacak soal agar soal yang dimunculkan selalu berbeda urutan.
Fungsi ini menggunakan bubblesort dengan memanfaatkan variabel random untuk diurutkan.
*/
char tempPertanyaan[100];
char tempA[10];
char tempB[10];
char tempC[10];
char tempD[10];
char tempJawabanPilgan[10];
char tempJawabanIsian[10];
int flag;
struct soal *last = NULL, *current = head;
while (current != NULL)
{
current->random = rand();
current = current->link;
}
do
{
flag = 0;
current = head;
while (current->link != last)
{
if (current->random > current->link->random)
{
strcpy(tempPertanyaan, current->pertanyaan);
strcpy(current->pertanyaan, current->link->pertanyaan);
strcpy(current->link->pertanyaan, tempPertanyaan);
strcpy(tempA, current->opsiA);
strcpy(current->opsiA, current->link->opsiA);
strcpy(current->link->opsiA, tempA);
strcpy(tempB, current->opsiB);
strcpy(current->opsiB, current->link->opsiB);
strcpy(current->link->opsiB, tempB);
strcpy(tempC, current->opsiC);
strcpy(current->opsiC, current->link->opsiC);
strcpy(current->link->opsiC, tempC);
strcpy(tempD, current->opsiD);
strcpy(current->opsiD, current->link->opsiD);
strcpy(current->link->opsiD, tempD);
strcpy(tempJawabanPilgan, current->jawabanPilgan);
strcpy(current->jawabanPilgan, current->link->jawabanPilgan);
strcpy(current->link->jawabanPilgan, tempJawabanPilgan);
strcpy(tempJawabanIsian, current->jawabanIsian);
strcpy(current->jawabanIsian, current->link->jawabanIsian);
strcpy(current->link->jawabanIsian, tempJawabanIsian);
flag = 1;
}
current = current->link;
}
last = current;
} while (flag);
}
|
assign4_openmp.c | #include<stdio.h>
#include<omp.h>
int main()
{
int mat[3][3],i,j,m;
int det=0;
printf("\nInput elements:");
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
scanf("%d",&mat[i][j]);
}
}
printf("\nThe Original Matrix is :\n");
for(i=0;i<3;i++)
{
for(j=0;j<3 ;j++)
printf("%2d",mat[i][j]);
printf("\n");
}
m=omp_get_num_procs();
omp_set_num_threads(m);
#pragma omp parallel for shared(mat) private(i) reduction(+:det)
for(i=0;i<3;i++)
det = det + (mat[0][i]*(mat[1][(i+1)%3]*mat[2][(i+2)%3] - mat[1][(i+2)%3]*mat[2][(i+1)%3]));
printf("\nThe Determinant value is: %d\n",det);
return 0;
}
|
par_gsmg.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Geometrically smooth interpolation multigrid
*
*****************************************************************************/
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include "_hypre_parcsr_ls.h"
#include "par_amg.h"
#include "_hypre_lapack.h"
#ifndef ABS
#define ABS(x) ((x)>0 ? (x) : -(x))
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b)?(a):(b))
#endif
static HYPRE_Real mydnrm2(HYPRE_Int n, HYPRE_Real *x)
{
HYPRE_Real temp = 0.;
HYPRE_Int i;
for (i=0; i<n; i++)
temp = temp + x[i]*x[i];
return sqrt(temp);
}
static void mydscal(HYPRE_Int n, HYPRE_Real a, HYPRE_Real *x)
{
HYPRE_Int i;
for (i=0; i<n; i++)
x[i] = a * x[i];
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFillSmooth
* - fill in smooth matrix
* - this function will scale the smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixFillSmooth(HYPRE_Int nsamples, HYPRE_Real *samples,
hypre_ParCSRMatrix *S, hypre_ParCSRMatrix *A,
HYPRE_Int num_functions, HYPRE_Int *dof_func)
{
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j, k, ii, index, start;
HYPRE_Int num_cols_offd;
HYPRE_Int num_sends;
HYPRE_Int *dof_func_offd;
HYPRE_Int *int_buf_data;
HYPRE_Real temp;
HYPRE_Real *p;
HYPRE_Real *p_offd;
HYPRE_Real *p_ptr;
HYPRE_Real *buf_data;
HYPRE_Real nm;
#if 0
HYPRE_Real mx = 0., my = 1.e+10;
#endif
/* normalize each sample vector and divide by number of samples */
for (k=0; k<nsamples; k++)
{
nm = mydnrm2(n, samples+k*n);
nm = 1./nm/nsamples;
mydscal(n, nm, samples+k*n);
}
num_cols_offd = hypre_CSRMatrixNumCols(S_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
buf_data = hypre_CTAlloc(HYPRE_Real, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
p_offd = hypre_CTAlloc(HYPRE_Real, nsamples*num_cols_offd, HYPRE_MEMORY_HOST);
p_ptr = p_offd;
p = samples;
for (k = 0; k < nsamples; k++)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
buf_data[index++]
= p[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, buf_data,
p_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
p = p+n;
p_offd = p_offd+num_cols_offd;
}
hypre_TFree(buf_data, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
for (i = 0; i < n; i++)
{
for (j = S_diag_i[i]+1; j < S_diag_i[i+1]; j++)
{
ii = S_diag_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func[ii])
{
S_diag_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_diag_data[j] == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p[ii]);
p = p + n;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_diag_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_diag_data[j] = temp;
}
for (j = S_offd_i[i]; j < S_offd_i[i+1]; j++)
{
ii = S_offd_j[j];
/* only interpolate between like functions */
if (num_functions > 1 && dof_func[i] != dof_func_offd[ii])
{
S_offd_data[j] = 0.;
continue;
}
/* explicit zeros */
if (A_offd_data[j] == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 0.;
p = samples;
p_offd = p_ptr;
for (k=0; k<nsamples; k++)
{
temp = temp + ABS(p[i] - p_offd[ii]);
p = p + n;
p_offd = p_offd + num_cols_offd;
}
/* explicit zeros in matrix may cause this */
if (temp == 0.)
{
S_offd_data[j] = 0.;
continue;
}
temp = 1./temp; /* reciprocal */
#if 0
my = hypre_min(my,temp);
mx = hypre_max(mx,temp);
#endif
S_offd_data[j] = temp;
}
}
#if 0
hypre_printf("MIN, MAX: %f %f\n", my, mx);
#endif
hypre_TFree(p_ptr, HYPRE_MEMORY_HOST);
if (num_functions > 1)
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixChooseThresh
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParCSRMatrixChooseThresh(hypre_ParCSRMatrix *S)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int i, j;
HYPRE_Real mx, minimax = 1.e+10;
HYPRE_Real minmin;
for (i=0; i<n; i++)
{
mx = 0.;
for (j=S_diag_i[i]; j<S_diag_i[i+1]; j++)
mx = hypre_max(mx, S_diag_data[j]);
for (j=S_offd_i[i]; j<S_offd_i[i+1]; j++)
mx = hypre_max(mx, S_offd_data[j]);
if (mx != 0.)
minimax = hypre_min(minimax, mx);
}
hypre_MPI_Allreduce(&minimax, &minmin, 1, HYPRE_MPI_REAL, hypre_MPI_MIN, comm);
return minmin;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixThreshold
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixThreshold(hypre_ParCSRMatrix *A, HYPRE_Real thresh)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int n = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_nonzeros_diag = A_diag_i[n];
HYPRE_Int num_nonzeros_offd = A_offd_i[n];
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
HYPRE_Int *S_offd_i;
HYPRE_Int *S_offd_j;
HYPRE_Real *S_offd_data;
HYPRE_Int count, i, jS, jA;
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_diag; i++)
if (A_diag_data[i] >= thresh)
count++;
/* allocate vectors */
S_diag_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_diag_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_diag_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= thresh)
{
S_diag_data[jS] = A_diag_data[jA];
S_diag_j[jS] = A_diag_j[jA];
jS++;
}
}
}
S_diag_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_diag) = jS;
/* free the vectors we don't need */
hypre_TFree(A_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_diag_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_diag) = S_diag_i;
hypre_CSRMatrixJ(A_diag) = S_diag_j;
hypre_CSRMatrixData(A_diag) = S_diag_data;
/*
* Offd part
*/
/* first count the number of nonzeros we will need */
count = 0;
for (i=0; i<num_nonzeros_offd; i++)
if (A_offd_data[i] >= thresh)
count++;
/* allocate vectors */
S_offd_i = hypre_CTAlloc(HYPRE_Int, n+1, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CTAlloc(HYPRE_Int, count, HYPRE_MEMORY_HOST);
S_offd_data = hypre_CTAlloc(HYPRE_Real, count, HYPRE_MEMORY_HOST);
jS = 0;
for (i = 0; i < n; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= thresh)
{
S_offd_data[jS] = A_offd_data[jA];
S_offd_j[jS] = A_offd_j[jA];
jS++;
}
}
}
S_offd_i[n] = jS;
hypre_CSRMatrixNumNonzeros(A_offd) = jS;
/* free the vectors we don't need */
hypre_TFree(A_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(A_offd_data, HYPRE_MEMORY_HOST);
/* assign the new vectors */
hypre_CSRMatrixI(A_offd) = S_offd_i;
hypre_CSRMatrixJ(A_offd) = S_offd_j;
hypre_CSRMatrixData(A_offd) = S_offd_data;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothVecs
* - smoother depends on the level being used
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothVecs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Int num_sweeps,
HYPRE_Int level,
HYPRE_Real **SmoothVecs_p)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_ParVector *Zero;
hypre_ParVector *Temp;
hypre_ParVector *U;
hypre_ParVector *Qtemp = NULL;
HYPRE_Int i;
HYPRE_BigInt n = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int n_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt *starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int sample;
HYPRE_Int nsamples = hypre_ParAMGDataNumSamples(amg_data);
HYPRE_Int ret;
HYPRE_Real *datax, *bp, *p;
HYPRE_Int rlx_type;
HYPRE_Int smooth_type;
HYPRE_Int smooth_option = 0;
HYPRE_Int smooth_num_levels;
HYPRE_Solver *smoother;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
HYPRE_Int num_threads;
num_threads = hypre_NumThreads();
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (debug_flag >= 1)
hypre_printf("Creating smooth dirs, %d sweeps, %d samples\n", num_sweeps,
nsamples);
smooth_type = hypre_ParAMGDataSmoothType(amg_data);
smooth_num_levels = hypre_ParAMGDataSmoothNumLevels(amg_data);
if (smooth_num_levels > level)
{
smooth_option = smooth_type;
smoother = hypre_ParAMGDataSmoother(amg_data);
num_sweeps = hypre_ParAMGDataSmoothNumSweeps(amg_data);
}
rlx_type = hypre_ParAMGDataGridRelaxType(amg_data)[0];
/* rlx_wt = hypre_ParAMGDataRelaxWeight(amg_data)[level]; */
/* omega = hypre_ParAMGDataOmega(amg_data)[level]; */
/* generate par vectors */
Zero = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Zero,0);
hypre_ParVectorInitialize(Zero);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Zero));
for (i=0; i<n_local; i++)
datax[i] = 0.;
Temp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(Temp,0);
hypre_ParVectorInitialize(Temp);
datax = hypre_VectorData(hypre_ParVectorLocalVector(Temp));
for (i=0; i<n_local; i++)
datax[i] = 0.;
U = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorSetPartitioningOwner(U,0);
hypre_ParVectorInitialize(U);
datax = hypre_VectorData(hypre_ParVectorLocalVector(U));
if (num_threads > 1)
{
Qtemp = hypre_ParVectorCreate(comm, n, starts);
hypre_ParVectorInitialize(Qtemp);
hypre_ParVectorSetPartitioningOwner(Qtemp,0);
}
/* allocate space for the vectors */
bp = hypre_CTAlloc(HYPRE_Real, nsamples*n_local, HYPRE_MEMORY_HOST);
p = bp;
/* generate random vectors */
for (sample=0; sample<nsamples; sample++)
{
for (i=0; i<n_local; i++)
datax[i] = hypre_Rand() - .5;
for (i=0; i<num_sweeps; i++)
{
if (smooth_option == 6)
{
HYPRE_SchwarzSolve(smoother[level],
(HYPRE_ParCSRMatrix) A,
(HYPRE_ParVector) Zero,
(HYPRE_ParVector) U);
}
else
{
ret = hypre_BoomerAMGRelax(A, Zero, NULL /*CFmarker*/,
rlx_type , 0 /*rel pts*/, 1.0 /*weight*/,
1.0 /*omega*/, NULL, U, Temp,
Qtemp);
hypre_assert(ret == 0);
}
}
/* copy out the solution */
for (i=0; i<n_local; i++)
*p++ = datax[i];
}
hypre_ParVectorDestroy(Zero);
hypre_ParVectorDestroy(Temp);
hypre_ParVectorDestroy(U);
if (num_threads > 1)
hypre_ParVectorDestroy(Qtemp);
*SmoothVecs_p = bp;
return 0;
}
/*--------------------------------------------------------------------------
* CreateSmoothDirs replaces CreateS in AMG
* - smoother depends on the level being used
* - in this version, CreateSmoothVecs must be called prior to this function
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGCreateSmoothDirs(void *data,
hypre_ParCSRMatrix *A,
HYPRE_Real *SmoothVecs,
HYPRE_Real thresh,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
hypre_ParAMGData *amg_data = (hypre_ParAMGData*) data;
hypre_ParCSRMatrix *S;
HYPRE_Real minimax;
HYPRE_Int debug_flag = hypre_ParAMGDataDebugFlag(amg_data);
S = hypre_ParCSRMatrixClone(A, 0);
/* Traverse S and fill in differences */
hypre_ParCSRMatrixFillSmooth(
hypre_ParAMGDataNumSamples(amg_data), SmoothVecs,
S, A, num_functions, dof_func);
minimax = hypre_ParCSRMatrixChooseThresh(S);
if (debug_flag >= 1)
hypre_printf("Minimax chosen: %f\n", minimax);
/* Threshold and compress */
hypre_ParCSRMatrixThreshold(S, thresh*minimax);
*S_ptr = S;
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGNormalizeVecs
*
* Normalize the smooth vectors and also make the first vector the constant
* vector
*
* inputs:
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
*
* output:
* V = adjusted smooth vectors
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGNormalizeVecs(HYPRE_Int n, HYPRE_Int num, HYPRE_Real *V)
{
HYPRE_Int i, j;
HYPRE_Real nrm;
/* change first vector to the constant vector */
for (i=0; i<n; i++)
V[i] = 1.0;
for (j=0; j<num; j++)
{
nrm = mydnrm2(n, &V[j*n]);
mydscal(n, 1./nrm, &V[j*n]);
}
return 0;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGFitVectors
*
* Construct interpolation weights based on fitting smooth vectors
*
* inputs:
* ip = row number of row in P being processed (0-based)
* n = length of smooth vectors
* num = number of smooth vectors
* V = smooth vectors (array of length n*num), also an output
* nc = number of coarse grid points
* ind = indices of coarse grid points (0-based)
*
* output:
* val = interpolation weights for the coarse grid points
* V = smooth vectors; first one has been changed to constant vector;
* vectors have also been normalized; this is also an input
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGFitVectors(HYPRE_Int ip, HYPRE_Int n, HYPRE_Int num, const HYPRE_Real *V,
HYPRE_Int nc, const HYPRE_Int *ind, HYPRE_Real *val)
{
HYPRE_Real *a, *b;
HYPRE_Real *ap;
HYPRE_Int i, j;
HYPRE_Real *work;
HYPRE_Int work_size;
HYPRE_Int info;
HYPRE_Int temp;
/*
hypre_printf("Fit: row %d, n %d num %d, nc = %d ", ip, n, num, nc);
for (i=0; i<nc; i++)
hypre_printf("%d ", ind[i]);
hypre_printf("\n");
*/
if (nc == 0)
return 0;
work_size = 2000*64;
work = hypre_CTAlloc(HYPRE_Real, work_size, HYPRE_MEMORY_HOST);
a = hypre_CTAlloc(HYPRE_Real, num*nc, HYPRE_MEMORY_HOST);
ap = a;
for (j=0; j<nc; j++)
{
for (i=0; i<num; i++)
{
*ap = V[i*n+ind[j]];
ap++;
}
}
temp = MAX(nc, num);
b = hypre_CTAlloc(HYPRE_Real, temp, HYPRE_MEMORY_HOST);
for (i=0; i<num; i++)
b[i] = V[i*n+ip];
{
char trans = 'N';
HYPRE_Int one = 1;
hypre_dgels(&trans, &num, &nc, &one, a, &num,
b, &temp, work, &work_size, &info);
if (info != 0)
hypre_error_w_msg(HYPRE_ERROR_GENERIC,"par_gsmg: dgels returned %d\n");
/* copy solution into output vector */
for (j=0; j<nc; j++)
val[j] = b[j];
}
hypre_TFree(b, HYPRE_MEMORY_HOST);
hypre_TFree(a, HYPRE_MEMORY_HOST);
hypre_TFree(work, HYPRE_MEMORY_HOST);
return info;
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpLS
*
* Interpolation built from fitting smooth vectors
* - sequential version only
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpLS( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
HYPRE_Int num_smooth,
HYPRE_Real *SmoothVecs,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
/* HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag); */
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
/* HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd); */
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
/* HYPRE_Int *col_map_offd = hypre_ParCSRMatrixColMapOffd(S); */
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *tmp_map_offd;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
//HYPRE_Real *S_ext_data;
//HYPRE_Int *S_ext_i;
//HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size;
HYPRE_Int P_offd_size;
HYPRE_Int *P_marker;
/* HYPRE_Int *P_marker_offd; */
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
/* HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd; */
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int *fine_to_coarse;
//HYPRE_BigInt *fine_to_coarse_offd;
HYPRE_Int *coarse_counter;
HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_Int i,i1;
HYPRE_Int j,jl,jj;
HYPRE_Int start;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
//HYPRE_BigInt *big_buf_data;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
//S_ext_i = hypre_CSRMatrixI(S_ext);
//S_ext_j = hypre_CSRMatrixBigJ(S_ext);
//S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
/* removed */
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*fine_to_coarse_offd = hypre_CTAlloc(HYPRE_BigInt, num_cols_S_offd, HYPRE_MEMORY_HOST);
big_buf_data = hypre_CTAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,ns,ne,size,rest,coarse_shift) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
coarse_shift = 0;
if (j > 0) coarse_shift = coarse_counter[j-1];
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
fine_to_coarse[i] += coarse_shift;
}
/*index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
big_buf_data[index++]
= my_first_cpt+(HYPRE_BigInt)fine_to_coarse[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 21, comm_pkg, big_buf_data,
fine_to_coarse_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 4 FineToCoarse = %f\n",
my_id, wall_time);
fflush(NULL);
}
if (debug_flag==4) wall_time = time_getWallclockSeconds();*/
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,jj,ns,ne,size,rest,P_marker,jj_counter,jj_counter_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
HYPRE_Int kk;
HYPRE_Int indices[1000]; /* kludge */
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
kk = 0;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_diag_j[jj_counter] = fine_to_coarse[i1];
jj_counter++;
indices[kk] = i1;
kk++;
}
}
hypre_BoomerAMGFitVectors(i, n_fine, num_smooth, SmoothVecs,
kk, indices, &P_diag_data[P_diag_i[i]]);
/* Off-Diagonal part of P */
/* undone */
}
}
}
P_diag_i[i] = jj_counter; /* check that this is in right place for threads */
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
//hypre_TFree(big_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
/*---------------------------------------------------------------------------
* hypre_BoomerAMGBuildInterpGSMG
*
* Difference with hypre_BoomerAMGBuildInterp is that S contains values
* and is used to build interpolation weights. Matrix A is not used.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_BoomerAMGBuildInterpGSMG( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
hypre_ParCSRMatrix *S,
HYPRE_BigInt *num_cpts_global,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
HYPRE_Int debug_flag,
HYPRE_Real trunc_factor,
hypre_ParCSRMatrix **P_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(S);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(S);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *S_diag = hypre_ParCSRMatrixDiag(S);
HYPRE_Real *S_diag_data = hypre_CSRMatrixData(S_diag);
HYPRE_Int *S_diag_i = hypre_CSRMatrixI(S_diag);
HYPRE_Int *S_diag_j = hypre_CSRMatrixJ(S_diag);
hypre_CSRMatrix *S_offd = hypre_ParCSRMatrixOffd(S);
HYPRE_Real *S_offd_data = hypre_CSRMatrixData(S_offd);
HYPRE_Int *S_offd_i = hypre_CSRMatrixI(S_offd);
HYPRE_Int *S_offd_j = hypre_CSRMatrixJ(S_offd);
HYPRE_Int num_cols_S_offd = hypre_CSRMatrixNumCols(S_offd);
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(S);
HYPRE_Int *tmp_map_offd = NULL;
hypre_ParCSRMatrix *P;
HYPRE_BigInt *col_map_offd_P;
HYPRE_Int *CF_marker_offd;
HYPRE_Int *dof_func_offd = NULL;
hypre_CSRMatrix *S_ext;
HYPRE_Real *S_ext_data;
HYPRE_Int *S_ext_i;
HYPRE_BigInt *S_ext_j;
hypre_CSRMatrix *P_diag;
hypre_CSRMatrix *P_offd;
HYPRE_Real *P_diag_data;
HYPRE_Int *P_diag_i;
HYPRE_Int *P_diag_j;
HYPRE_Real *P_offd_data;
HYPRE_Int *P_offd_i;
HYPRE_Int *P_offd_j;
HYPRE_Int P_diag_size, P_offd_size;
HYPRE_Int *P_marker, *P_marker_offd;
HYPRE_Int jj_counter,jj_counter_offd;
HYPRE_Int *jj_count, *jj_count_offd;
HYPRE_Int jj_begin_row,jj_begin_row_offd;
HYPRE_Int jj_end_row,jj_end_row_offd;
HYPRE_Int start_indexing = 0; /* start indexing for P_data at 0 */
HYPRE_Int n_fine = hypre_CSRMatrixNumRows(S_diag);
HYPRE_Int strong_f_marker;
HYPRE_Int *fine_to_coarse;
HYPRE_Int *coarse_counter;
//HYPRE_Int coarse_shift;
HYPRE_BigInt total_global_cpts;
HYPRE_Int num_cols_P_offd;
//HYPRE_BigInt my_first_cpt;
HYPRE_BigInt big_i2;
HYPRE_Int i,i1,i2;
HYPRE_Int j,jl,jj,jj1;
HYPRE_Int start;
HYPRE_Int c_num;
HYPRE_Real sum;
HYPRE_Real distribute;
HYPRE_Real zero = 0.0;
HYPRE_Real one = 1.0;
HYPRE_Int my_id;
HYPRE_Int num_procs;
HYPRE_Int num_threads;
HYPRE_Int num_sends;
HYPRE_Int index;
HYPRE_Int ns, ne, size, rest;
HYPRE_Int *int_buf_data;
HYPRE_BigInt col_1 = hypre_ParCSRMatrixFirstRowIndex(S);
HYPRE_Int local_numrows = hypre_CSRMatrixNumRows(S_diag);
HYPRE_BigInt col_n = col_1 + (HYPRE_BigInt)local_numrows;
HYPRE_Real wall_time; /* for debugging instrumentation */
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
#ifdef HYPRE_NO_GLOBAL_PARTITION
//my_first_cpt = num_cpts_global[0];
total_global_cpts = 0; /* we will set this later for the matrix in the setup */
/* if (myid == (num_procs -1)) total_global_cpts = coarse_pts_global[1];
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_INT, num_procs-1, comm);*/
#else
//my_first_cpt = num_cpts_global[my_id];
total_global_cpts = num_cpts_global[num_procs];
#endif
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns
*-------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
CF_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1 && num_cols_S_offd)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(S);
comm_pkg = hypre_ParCSRMatrixCommPkg(S);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= CF_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
CF_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
if (num_functions > 1)
{
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++]
= dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 1 CF_marker = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*----------------------------------------------------------------------
* Get the ghost rows of S
*---------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,S,1);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
S_ext_data = hypre_CSRMatrixData(S_ext);
}
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Comm 2 Get S_ext = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* First Pass: Determine size of P and fill in fine_to_coarse mapping.
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------------
* Intialize counters and allocate mapping vector.
*-----------------------------------------------------------------------*/
coarse_counter = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
fine_to_coarse = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < n_fine; i++) fine_to_coarse[i] = -1;
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
/*-----------------------------------------------------------------------
* Loop over fine grid.
*-----------------------------------------------------------------------*/
/* RDF: this looks a little tricky, but doable */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,i1,jj,ns,ne,size,rest) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < num_threads; j++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a C-point, interpolation is the identity. Also set up
* mapping vector.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
jj_count[j]++;
fine_to_coarse[i] = coarse_counter[j];
coarse_counter[j]++;
}
/*--------------------------------------------------------------------
* If i is an F-point, interpolation is from the C-points that
* strongly influence i.
*--------------------------------------------------------------------*/
else
{
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
if (CF_marker[i1] >= 0)
{
jj_count[j]++;
}
}
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
if (CF_marker_offd[i1] >= 0)
{
jj_count_offd[j]++;
}
}
}
}
}
}
/*-----------------------------------------------------------------------
* Allocate arrays.
*-----------------------------------------------------------------------*/
for (i=0; i < num_threads-1; i++)
{
coarse_counter[i+1] += coarse_counter[i];
jj_count[i+1] += jj_count[i];
jj_count_offd[i+1] += jj_count_offd[i];
}
i = num_threads-1;
jj_counter = jj_count[i];
jj_counter_offd = jj_count_offd[i];
P_diag_size = jj_counter;
P_diag_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_diag_j = hypre_CTAlloc(HYPRE_Int, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_data = hypre_CTAlloc(HYPRE_Real, P_diag_size, HYPRE_MEMORY_HOST);
P_diag_i[n_fine] = jj_counter;
P_offd_size = jj_counter_offd;
P_offd_i = hypre_CTAlloc(HYPRE_Int, n_fine+1, HYPRE_MEMORY_HOST);
P_offd_j = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
P_offd_data = hypre_CTAlloc(HYPRE_Real, P_offd_size, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Intialize some stuff.
*-----------------------------------------------------------------------*/
jj_counter = start_indexing;
jj_counter_offd = start_indexing;
if (debug_flag==4)
{
wall_time = time_getWallclockSeconds() - wall_time;
hypre_printf("Proc = %d Interp: Internal work 1 = %f\n",
my_id, wall_time);
fflush(NULL);
}
/*-----------------------------------------------------------------------
* Send and receive fine_to_coarse info.
*-----------------------------------------------------------------------*/
if (debug_flag==4) wall_time = time_getWallclockSeconds();
/*-----------------------------------------------------------------------
* Loop over fine grid points.
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,jl,i1,i2,jj,jj1,ns,ne,size,rest,sum,distribute,P_marker,P_marker_offd,strong_f_marker,jj_counter,jj_counter_offd,c_num,jj_begin_row,jj_end_row,jj_begin_row_offd,jj_end_row_offd) HYPRE_SMP_SCHEDULE
#endif
for (jl = 0; jl < num_threads; jl++)
{
size = n_fine/num_threads;
rest = n_fine - size*num_threads;
if (jl < rest)
{
ns = jl*size+jl;
ne = (jl+1)*size+jl+1;
}
else
{
ns = jl*size+rest;
ne = (jl+1)*size+rest;
}
jj_counter = 0;
if (jl > 0) jj_counter = jj_count[jl-1];
jj_counter_offd = 0;
if (jl > 0) jj_counter_offd = jj_count_offd[jl-1];
P_marker = hypre_CTAlloc(HYPRE_Int, n_fine, HYPRE_MEMORY_HOST);
P_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_S_offd, HYPRE_MEMORY_HOST);
for (i = 0; i < n_fine; i++)
{
P_marker[i] = -1;
}
for (i = 0; i < num_cols_S_offd; i++)
{
P_marker_offd[i] = -1;
}
strong_f_marker = -2;
for (i = ns; i < ne; i++)
{
/*--------------------------------------------------------------------
* If i is a c-point, interpolation is the identity.
*--------------------------------------------------------------------*/
if (CF_marker[i] >= 0)
{
P_diag_i[i] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i];
P_diag_data[jj_counter] = one;
jj_counter++;
}
/*--------------------------------------------------------------------
* If i is an F-point, build interpolation.
*--------------------------------------------------------------------*/
else
{
/* Diagonal part of P */
P_diag_i[i] = jj_counter;
jj_begin_row = jj_counter;
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_diag_j
* and initialize interpolation weight to zero.
*--------------------------------------------------------------*/
if (CF_marker[i1] >= 0)
{
P_marker[i1] = jj_counter;
P_diag_j[jj_counter] = fine_to_coarse[i1];
P_diag_data[jj_counter] = zero;
jj_counter++;
}
/*--------------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*--------------------------------------------------------------*/
else
{
P_marker[i1] = strong_f_marker;
}
}
jj_end_row = jj_counter;
/* Off-Diagonal part of P */
P_offd_i[i] = jj_counter_offd;
jj_begin_row_offd = jj_counter_offd;
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*-----------------------------------------------------------
* If neighbor i1 is a C-point, set column number in P_offd_j
* and initialize interpolation weight to zero.
*-----------------------------------------------------------*/
if (CF_marker_offd[i1] >= 0)
{
P_marker_offd[i1] = jj_counter_offd;
P_offd_j[jj_counter_offd] = i1;
P_offd_data[jj_counter_offd] = zero;
jj_counter_offd++;
}
/*-----------------------------------------------------------
* If neighbor i1 is an F-point, mark it as a strong F-point
* whose connection needs to be distributed.
*-----------------------------------------------------------*/
else
{
P_marker_offd[i1] = strong_f_marker;
}
}
}
jj_end_row_offd = jj_counter_offd;
/* Loop over ith row of S. First, the diagonal part of S */
for (jj = S_diag_i[i]; jj < S_diag_i[i+1]; jj++)
{
i1 = S_diag_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker[i1] >= jj_begin_row)
{
P_diag_data[P_marker[i1]] += S_diag_data[jj];
}
/*--------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*--------------------------------------------------------------*/
else if (P_marker[i1] == strong_f_marker)
{
sum = zero;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
sum += S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
sum += S_offd_data[jj1];
}
}
if (sum != 0)
{
distribute = S_diag_data[jj] / sum;
/*-----------------------------------------------------------
* Loop over row of S for point i1 and do the distribution.
*-----------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_diag_i[i1]; jj1 < S_diag_i[i1+1]; jj1++)
{
i2 = S_diag_j[jj1];
if (P_marker[i2] >= jj_begin_row)
P_diag_data[P_marker[i2]]
+= distribute * S_diag_data[jj1];
}
/* Off-Diagonal block part of row i1 */
if (num_procs > 1)
{
for (jj1 = S_offd_i[i1]; jj1 < S_offd_i[i1+1]; jj1++)
{
i2 = S_offd_j[jj1];
if (P_marker_offd[i2] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[i2]]
+= distribute * S_offd_data[jj1];
}
}
}
else
{
/* do nothing */
}
}
/*--------------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*--------------------------------------------------------------*/
else
{
/* do nothing */
}
}
/*----------------------------------------------------------------
* Still looping over ith row of S. Next, loop over the
* off-diagonal part of S
*---------------------------------------------------------------*/
if (num_procs > 1)
{
for (jj = S_offd_i[i]; jj < S_offd_i[i+1]; jj++)
{
i1 = S_offd_j[jj];
/*--------------------------------------------------------------
* Case 1: neighbor i1 is a C-point and strongly influences i,
* accumulate a_{i,i1} into the interpolation weight.
*--------------------------------------------------------------*/
if (P_marker_offd[i1] >= jj_begin_row_offd)
{
P_offd_data[P_marker_offd[i1]] += S_offd_data[jj];
}
/*------------------------------------------------------------
* Case 2: neighbor i1 is an F-point and strongly influences i,
* distribute a_{i,i1} to C-points that strongly infuence i.
* Note: currently no distribution to the diagonal in this case.
*-----------------------------------------------------------*/
else if (P_marker_offd[i1] == strong_f_marker)
{
sum = zero;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and calculate the sum
* of the connections to c-points that strongly influence i.
*---------------------------------------------------------*/
/* find row number */
c_num = S_offd_j[jj];
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n)
{
/* in the diagonal block */
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
sum += S_ext_data[jj1];
}
else
{
/* in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
sum += S_ext_data[jj1];
}
}
}
if (sum != 0)
{
distribute = S_offd_data[jj] / sum;
/*---------------------------------------------------------
* Loop over row of S_ext for point i1 and do
* the distribution.
*--------------------------------------------------------*/
/* Diagonal block part of row i1 */
for (jj1 = S_ext_i[c_num]; jj1 < S_ext_i[c_num+1]; jj1++)
{
big_i2 = S_ext_j[jj1];
if (big_i2 >= col_1 && big_i2 < col_n) /* in the diagonal block */
{
if (P_marker[(HYPRE_Int)(big_i2-col_1)] >= jj_begin_row)
P_diag_data[P_marker[(HYPRE_Int)(big_i2-col_1)]]
+= distribute * S_ext_data[jj1];
}
else
{
/* check to see if it is in the off_diagonal block */
j = hypre_BigBinarySearch(col_map_offd,big_i2,num_cols_S_offd);
if (j != -1)
{
if (P_marker_offd[j] >= jj_begin_row_offd)
P_offd_data[P_marker_offd[j]]
+= distribute * S_ext_data[jj1];
}
}
}
}
else
{
/* do nothing */
}
}
/*-----------------------------------------------------------
* Case 3: neighbor i1 weakly influences i, accumulate a_{i,i1}
* into the diagonal.
*-----------------------------------------------------------*/
else
{
/* do nothing */
}
}
}
/*-----------------------------------------------------------------
* Set interpolation weight by dividing by the diagonal.
*-----------------------------------------------------------------*/
sum = 0.;
for (jj = jj_begin_row; jj < jj_end_row; jj++)
sum += P_diag_data[jj];
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
sum += P_offd_data[jj];
for (jj = jj_begin_row; jj < jj_end_row; jj++)
P_diag_data[jj] /= sum;
for (jj = jj_begin_row_offd; jj < jj_end_row_offd; jj++)
P_offd_data[jj] /= sum;
}
strong_f_marker--;
P_offd_i[i+1] = jj_counter_offd;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
hypre_TFree(P_marker_offd, HYPRE_MEMORY_HOST);
}
P = hypre_ParCSRMatrixCreate(comm,
hypre_ParCSRMatrixGlobalNumRows(S),
total_global_cpts,
hypre_ParCSRMatrixColStarts(S),
num_cpts_global,
0,
P_diag_i[n_fine],
P_offd_i[n_fine]);
P_diag = hypre_ParCSRMatrixDiag(P);
hypre_CSRMatrixData(P_diag) = P_diag_data;
hypre_CSRMatrixI(P_diag) = P_diag_i;
hypre_CSRMatrixJ(P_diag) = P_diag_j;
P_offd = hypre_ParCSRMatrixOffd(P);
hypre_CSRMatrixData(P_offd) = P_offd_data;
hypre_CSRMatrixI(P_offd) = P_offd_i;
hypre_CSRMatrixJ(P_offd) = P_offd_j;
hypre_ParCSRMatrixOwnsRowStarts(P) = 0;
/* Compress P, removing coefficients smaller than trunc_factor * Max */
if (trunc_factor != 0.0)
{
hypre_BoomerAMGInterpTruncation(P, trunc_factor, 0);
P_diag_data = hypre_CSRMatrixData(P_diag);
P_diag_i = hypre_CSRMatrixI(P_diag);
P_diag_j = hypre_CSRMatrixJ(P_diag);
P_offd_data = hypre_CSRMatrixData(P_offd);
P_offd_i = hypre_CSRMatrixI(P_offd);
P_offd_j = hypre_CSRMatrixJ(P_offd);
P_diag_size = P_diag_i[n_fine];
P_offd_size = P_offd_i[n_fine];
}
num_cols_P_offd = 0;
if (P_offd_size)
{
P_marker = hypre_CTAlloc(HYPRE_Int, P_offd_size, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_marker[i] = P_offd_j[i];
hypre_qsort0(P_marker, 0, P_offd_size-1);
num_cols_P_offd = 1;
index = P_marker[0];
for (i=1; i < P_offd_size; i++)
{
if (P_marker[i] > index)
{
index = P_marker[i];
P_marker[num_cols_P_offd++] = index;
}
}
col_map_offd_P = hypre_CTAlloc(HYPRE_BigInt, num_cols_P_offd, HYPRE_MEMORY_HOST);
tmp_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_P_offd, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_P_offd; i++)
tmp_map_offd[i] = P_marker[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE
#endif
for (i=0; i < P_offd_size; i++)
P_offd_j[i] = hypre_BinarySearch(tmp_map_offd,
P_offd_j[i],
num_cols_P_offd);
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
if (num_cols_P_offd)
{
hypre_ParCSRMatrixColMapOffd(P) = col_map_offd_P;
hypre_CSRMatrixNumCols(P_offd) = num_cols_P_offd;
}
hypre_GetCommPkgRTFromCommPkgA(P,S,fine_to_coarse, tmp_map_offd);
*P_ptr = P;
hypre_TFree(CF_marker_offd, HYPRE_MEMORY_HOST);
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(fine_to_coarse, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_map_offd, HYPRE_MEMORY_HOST);
hypre_TFree(coarse_counter, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd, HYPRE_MEMORY_HOST);
if (num_procs > 1) hypre_CSRMatrixDestroy(S_ext);
return(0);
}
|
jump-openmp.c | /* { dg-do compile } */
/* { dg-options "-fcilkplus -fopenmp" } */
/* { dg-require-effective-target fopenmp } */
int *a, *b, c;
void foo()
{
#pragma simd
for (int i=0; i < 1000; ++i)
{
a[i] = b[i];
if (c == 5)
return; /* { dg-error "invalid branch to/from Cilk Plus structured block" } */
}
}
void bar()
{
#pragma simd
for (int i=0; i < 1000; ++i)
{
lab:
a[i] = b[i];
}
if (c == 6)
goto lab; /* { dg-error "invalid entry to Cilk Plus structured block" } */
}
void baz()
{
bad1:
#pragma omp parallel
goto bad1; /* { dg-error "invalid branch to/from OpenMP structured block" } */
goto bad2; /* { dg-error "invalid entry to OpenMP structured block" } */
#pragma omp parallel
{
bad2: ;
}
#pragma omp parallel
{
int i;
goto ok1;
for (i = 0; i < 10; ++i)
{ ok1: break; }
}
}
|
ccv_bbf.c | #include "ccv.h"
#include "ccv_internal.h"
#include <sys/time.h>
#ifdef HAVE_GSL
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#endif
#ifdef USE_OPENMP
#include <omp.h>
#endif
const ccv_bbf_param_t ccv_bbf_default_params = {
.interval = 5,
.min_neighbors = 2,
.accurate = 1,
.flags = 0,
.size = {
24,
24,
},
};
#define _ccv_width_padding(x) (((x) + 3) & -4)
static inline int _ccv_run_bbf_feature(ccv_bbf_feature_t* feature, int* step, unsigned char** u8)
{
#define pf_at(i) (*(u8[feature->pz[i]] + feature->px[i] + feature->py[i] * step[feature->pz[i]]))
#define nf_at(i) (*(u8[feature->nz[i]] + feature->nx[i] + feature->ny[i] * step[feature->nz[i]]))
unsigned char pmin = pf_at(0), nmax = nf_at(0);
/* check if every point in P > every point in N, and take a shortcut */
if (pmin <= nmax)
return 0;
int i;
for (i = 1; i < feature->size; i++)
{
if (feature->pz[i] >= 0)
{
int p = pf_at(i);
if (p < pmin)
{
if (p <= nmax)
return 0;
pmin = p;
}
}
if (feature->nz[i] >= 0)
{
int n = nf_at(i);
if (n > nmax)
{
if (pmin <= n)
return 0;
nmax = n;
}
}
}
#undef pf_at
#undef nf_at
return 1;
}
static int _ccv_read_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* r = fopen(file, "r");
if (r == 0) return -1;
int stat = 0;
stat |= fscanf(r, "%d", &classifier->count);
union { float fl; int i; } fli;
stat |= fscanf(r, "%d", &fli.i);
classifier->threshold = fli.fl;
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
int i, j;
for (i = 0; i < classifier->count; i++)
{
stat |= fscanf(r, "%d", &classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].px[j], &classifier->feature[i].py[j], &classifier->feature[i].pz[j]);
stat |= fscanf(r, "%d %d %d", &classifier->feature[i].nx[j], &classifier->feature[i].ny[j], &classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
stat |= fscanf(r, "%d %d", &flia.i, &flib.i);
classifier->alpha[i * 2] = flia.fl;
classifier->alpha[i * 2 + 1] = flib.fl;
}
fclose(r);
return 0;
}
#ifdef HAVE_GSL
static unsigned int _ccv_bbf_time_measure()
{
struct timeval tv;
gettimeofday(&tv, 0);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
#define less_than(a, b, aux) ((a) < (b))
CCV_IMPLEMENT_QSORT(_ccv_sort_32f, float, less_than)
#undef less_than
static void _ccv_bbf_eval_data(ccv_bbf_stage_classifier_t* classifier, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, float* peval, float* neval)
{
int i, j;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
peval[i] = sum;
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (j = 0; j < classifier->count; ++j, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
neval[i] = sum;
}
}
static int _ccv_prune_positive_data(ccv_bbf_classifier_cascade_t* cascade, unsigned char** posdata, int posnum, ccv_size_t size)
{
float* peval = (float*)ccmalloc(posnum * sizeof(float));
int i, j, k, rpos = posnum;
for (i = 0; i < cascade->count; i++)
{
_ccv_bbf_eval_data(cascade->stage_classifier + i, posdata, rpos, 0, 0, size, peval, 0);
k = 0;
for (j = 0; j < rpos; j++)
if (peval[j] >= cascade->stage_classifier[i].threshold)
{
posdata[k] = posdata[j];
++k;
} else {
ccfree(posdata[j]);
}
rpos = k;
}
ccfree(peval);
return rpos;
}
static int _ccv_prepare_background_data(ccv_bbf_classifier_cascade_t* cascade, char** bgfiles, int bgnum, unsigned char** negdata, int negnum)
{
int t, i, j, k, q;
int negperbg;
int negtotal = 0;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs1 = steps[1] * (cascade->size.height >> 1);
int isizs2 = steps[2] * (cascade->size.height >> 2);
int* idcheck = (int*)ccmalloc(negnum * sizeof(int));
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
gsl_rng_set(rng, (unsigned long int)idcheck);
ccv_size_t imgsz = cascade->size;
int rneg = negtotal;
for (t = 0; negtotal < negnum; t++)
{
printf("preparing negative data ... 0%%");
for (i = 0; i < bgnum; i++)
{
negperbg = (t < 2) ? (negnum - negtotal) / (bgnum - i) + 1 : negnum - negtotal;
ccv_dense_matrix_t* image = 0;
ccv_read(bgfiles[i], &image, CCV_IO_GRAY | CCV_IO_ANY_FILE);
assert((image->type & CCV_C1) && (image->type & CCV_8U));
if (image == 0)
{
printf("\n%s file corrupted\n", bgfiles[i]);
continue;
}
if (t % 2 != 0)
ccv_flip(image, 0, 0, CCV_FLIP_X);
if (t % 4 >= 2)
ccv_flip(image, 0, 0, CCV_FLIP_Y);
ccv_bbf_param_t params = { .interval = 3, .min_neighbors = 0, .accurate = 1, .flags = 0, .size = cascade->size };
ccv_array_t* detected = ccv_bbf_detect_objects(image, &cascade, 1, params);
memset(idcheck, 0, ccv_min(detected->rnum, negperbg) * sizeof(int));
for (j = 0; j < ccv_min(detected->rnum, negperbg); j++)
{
int r = gsl_rng_uniform_int(rng, detected->rnum);
int flag = 1;
ccv_rect_t* rect = (ccv_rect_t*)ccv_array_get(detected, r);
while (flag) {
flag = 0;
for (k = 0; k < j; k++)
if (r == idcheck[k])
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
break;
}
rect = (ccv_rect_t*)ccv_array_get(detected, r);
if ((rect->x < 0) || (rect->y < 0) || (rect->width + rect->x >= image->cols) || (rect->height + rect->y >= image->rows))
{
flag = 1;
r = gsl_rng_uniform_int(rng, detected->rnum);
}
}
idcheck[j] = r;
ccv_dense_matrix_t* temp = 0;
ccv_dense_matrix_t* imgs0 = 0;
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
ccv_slice(image, (ccv_matrix_t**)&temp, 0, rect->y, rect->x, rect->height, rect->width);
ccv_resample(temp, &imgs0, 0, imgsz.height, imgsz.width, CCV_INTER_AREA);
assert(imgs0->step == steps[0]);
ccv_matrix_free(temp);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
assert(imgs1->step == steps[1]);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
assert(imgs2->step == steps[2]);
negdata[negtotal] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
unsigned char* u8s0 = negdata[negtotal];
unsigned char* u8s1 = negdata[negtotal] + isizs0;
unsigned char* u8s2 = negdata[negtotal] + isizs0 + isizs1;
unsigned char* u8[] = { u8s0, u8s1, u8s2 };
memcpy(u8s0, imgs0->data.u8, imgs0->rows * imgs0->step);
ccv_matrix_free(imgs0);
memcpy(u8s1, imgs1->data.u8, imgs1->rows * imgs1->step);
ccv_matrix_free(imgs1);
memcpy(u8s2, imgs2->data.u8, imgs2->rows * imgs2->step);
ccv_matrix_free(imgs2);
flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (k = 0; k < cascade->count; ++k, ++classifier)
{
float sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (q = 0; q < classifier->count; ++q, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (!flag)
ccfree(negdata[negtotal]);
else {
++negtotal;
if (negtotal >= negnum)
break;
}
}
ccv_array_free(detected);
ccv_matrix_free(image);
ccv_drain_cache();
printf("\rpreparing negative data ... %2d%%", 100 * negtotal / negnum);
fflush(0);
if (negtotal >= negnum)
break;
}
if (rneg == negtotal)
break;
rneg = negtotal;
printf("\nentering additional round %d\n", t + 1);
}
gsl_rng_free(rng);
ccfree(idcheck);
ccv_drain_cache();
printf("\n");
return negtotal;
}
static void _ccv_prepare_positive_data(ccv_dense_matrix_t** posimg, unsigned char** posdata, ccv_size_t size, int posnum)
{
printf("preparing positive data ... 0%%");
int i;
for (i = 0; i < posnum; i++)
{
ccv_dense_matrix_t* imgs0 = posimg[i];
ccv_dense_matrix_t* imgs1 = 0;
ccv_dense_matrix_t* imgs2 = 0;
assert((imgs0->type & CCV_C1) && (imgs0->type & CCV_8U) && imgs0->rows == size.height && imgs0->cols == size.width);
ccv_sample_down(imgs0, &imgs1, 0, 0, 0);
ccv_sample_down(imgs1, &imgs2, 0, 0, 0);
int isizs0 = imgs0->rows * imgs0->step;
int isizs1 = imgs1->rows * imgs1->step;
int isizs2 = imgs2->rows * imgs2->step;
posdata[i] = (unsigned char*)ccmalloc(isizs0 + isizs1 + isizs2);
memcpy(posdata[i], imgs0->data.u8, isizs0);
memcpy(posdata[i] + isizs0, imgs1->data.u8, isizs1);
memcpy(posdata[i] + isizs0 + isizs1, imgs2->data.u8, isizs2);
printf("\rpreparing positive data ... %2d%%", 100 * (i + 1) / posnum);
fflush(0);
ccv_matrix_free(imgs1);
ccv_matrix_free(imgs2);
}
ccv_drain_cache();
printf("\n");
}
typedef struct {
double fitness;
int pk, nk;
int age;
double error;
ccv_bbf_feature_t feature;
} ccv_bbf_gene_t;
static inline void _ccv_bbf_genetic_fitness(ccv_bbf_gene_t* gene)
{
gene->fitness = (1 - gene->error) * exp(-0.01 * gene->age) * exp((gene->pk + gene->nk) * log(1.015));
}
static inline int _ccv_bbf_exist_gene_feature(ccv_bbf_gene_t* gene, int x, int y, int z)
{
int i;
for (i = 0; i < gene->pk; i++)
if (z == gene->feature.pz[i] && x == gene->feature.px[i] && y == gene->feature.py[i])
return 1;
for (i = 0; i < gene->nk; i++)
if (z == gene->feature.nz[i] && x == gene->feature.nx[i] && y == gene->feature.ny[i])
return 1;
return 0;
}
static inline void _ccv_bbf_randomize_gene(gsl_rng* rng, ccv_bbf_gene_t* gene, int* rows, int* cols)
{
int i;
do {
gene->pk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
gene->nk = gsl_rng_uniform_int(rng, CCV_BBF_POINT_MAX - 1) + 1;
} while (gene->pk + gene->nk < CCV_BBF_POINT_MIN); /* a hard restriction of at least 3 points have to be examed */
gene->feature.size = ccv_max(gene->pk, gene->nk);
gene->age = 0;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
{
gene->feature.pz[i] = -1;
gene->feature.nz[i] = -1;
}
int x, y, z;
for (i = 0; i < gene->pk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.pz[i] = z;
gene->feature.px[i] = x;
gene->feature.py[i] = y;
}
for (i = 0; i < gene->nk; i++)
{
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while ( _ccv_bbf_exist_gene_feature(gene, x, y, z));
gene->feature.nz[i] = z;
gene->feature.nx[i] = x;
gene->feature.ny[i] = y;
}
}
static inline double _ccv_bbf_error_rate(ccv_bbf_feature_t* feature, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
int steps[] = { _ccv_width_padding(size.width),
_ccv_width_padding(size.width >> 1),
_ccv_width_padding(size.width >> 2) };
int isizs0 = steps[0] * size.height;
int isizs01 = isizs0 + steps[1] * (size.height >> 1);
double error = 0;
for (i = 0; i < posnum; i++)
{
unsigned char* u8[] = { posdata[i], posdata[i] + isizs0, posdata[i] + isizs01 };
if (!_ccv_run_bbf_feature(feature, steps, u8))
error += pw[i];
}
for (i = 0; i < negnum; i++)
{
unsigned char* u8[] = { negdata[i], negdata[i] + isizs0, negdata[i] + isizs01 };
if ( _ccv_run_bbf_feature(feature, steps, u8))
error += nw[i];
}
return error;
}
#define less_than(fit1, fit2, aux) ((fit1).fitness >= (fit2).fitness)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_genetic_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_feature_t _ccv_bbf_genetic_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, int ftnum, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_feature_t best;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j;
int pnum = ftnum * 100;
assert(pnum > 0);
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc(pnum * sizeof(ccv_bbf_gene_t));
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
for (i = 0; i < pnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
double best_err = 1;
int rnum = ftnum * 39; /* number of randomize */
int mnum = ftnum * 40; /* number of mutation */
int hnum = ftnum * 20; /* number of hybrid */
/* iteration stop crit : best no change in 40 iterations */
int it = 0, t;
for (t = 0 ; it < 40; ++it, ++t)
{
int min_id = 0;
double min_err = gene[0].error;
for (i = 1; i < pnum; i++)
if (gene[i].error < min_err)
{
min_id = i;
min_err = gene[i].error;
}
min_err = gene[min_id].error = _ccv_bbf_error_rate(&gene[min_id].feature, posdata, posnum, negdata, negnum, size, pw, nw);
if (min_err < best_err)
{
best_err = min_err;
memcpy(&best, &gene[min_id].feature, sizeof(best));
printf("best bbf feature with error %f\n|-size: %d\n|-positive point: ", best_err, best.size);
for (i = 0; i < best.size; i++)
printf("(%d %d %d), ", best.px[i], best.py[i], best.pz[i]);
printf("\n|-negative point: ");
for (i = 0; i < best.size; i++)
printf("(%d %d %d), ", best.nx[i], best.ny[i], best.nz[i]);
printf("\n");
it = 0;
}
printf("minimum error achieved in round %d(%d) : %f with %d ms\n", t, it, min_err, timer / 1000);
_ccv_bbf_genetic_qsort(gene, pnum, 0);
for (i = 0; i < ftnum; i++)
++gene[i].age;
for (i = ftnum; i < ftnum + mnum; i++)
{
int parent = gsl_rng_uniform_int(rng, ftnum);
memcpy(gene + i, gene + parent, sizeof(ccv_bbf_gene_t));
/* three mutation strategy : 1. add, 2. remove, 3. refine */
int pnm, pn = gsl_rng_uniform_int(rng, 2);
int* pnk[] = { &gene[i].pk, &gene[i].nk };
int* pnx[] = { gene[i].feature.px, gene[i].feature.nx };
int* pny[] = { gene[i].feature.py, gene[i].feature.ny };
int* pnz[] = { gene[i].feature.pz, gene[i].feature.nz };
int x, y, z;
int victim, decay = 1;
do {
switch (gsl_rng_uniform_int(rng, 3))
{
case 0: /* add */
if (gene[i].pk == CCV_BBF_POINT_MAX && gene[i].nk == CCV_BBF_POINT_MAX)
break;
while (*pnk[pn] + 1 > CCV_BBF_POINT_MAX)
pn = gsl_rng_uniform_int(rng, 2);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][*pnk[pn]] = z;
pnx[pn][*pnk[pn]] = x;
pny[pn][*pnk[pn]] = y;
++(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 1: /* remove */
if (gene[i].pk + gene[i].nk <= CCV_BBF_POINT_MIN) /* at least 3 points have to be examed */
break;
while (*pnk[pn] - 1 <= 0) // || *pnk[pn] + *pnk[!pn] - 1 < CCV_BBF_POINT_MIN)
pn = gsl_rng_uniform_int(rng, 2);
victim = gsl_rng_uniform_int(rng, *pnk[pn]);
for (j = victim; j < *pnk[pn] - 1; j++)
{
pnz[pn][j] = pnz[pn][j + 1];
pnx[pn][j] = pnx[pn][j + 1];
pny[pn][j] = pny[pn][j + 1];
}
pnz[pn][*pnk[pn] - 1] = -1;
--(*pnk[pn]);
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
decay = gene[i].age = 0;
break;
case 2: /* refine */
pnm = gsl_rng_uniform_int(rng, *pnk[pn]);
do {
z = gsl_rng_uniform_int(rng, 3);
x = gsl_rng_uniform_int(rng, cols[z]);
y = gsl_rng_uniform_int(rng, rows[z]);
} while (_ccv_bbf_exist_gene_feature(&gene[i], x, y, z));
pnz[pn][pnm] = z;
pnx[pn][pnm] = x;
pny[pn][pnm] = y;
decay = gene[i].age = 0;
break;
}
} while (decay);
}
for (i = ftnum + mnum; i < ftnum + mnum + hnum; i++)
{
/* hybrid strategy: taking positive points from dad, negative points from mum */
int dad, mum;
do {
dad = gsl_rng_uniform_int(rng, ftnum);
mum = gsl_rng_uniform_int(rng, ftnum);
} while (dad == mum || gene[dad].pk + gene[mum].nk < CCV_BBF_POINT_MIN); /* at least 3 points have to be examed */
for (j = 0; j < CCV_BBF_POINT_MAX; j++)
{
gene[i].feature.pz[j] = -1;
gene[i].feature.nz[j] = -1;
}
gene[i].pk = gene[dad].pk;
for (j = 0; j < gene[i].pk; j++)
{
gene[i].feature.pz[j] = gene[dad].feature.pz[j];
gene[i].feature.px[j] = gene[dad].feature.px[j];
gene[i].feature.py[j] = gene[dad].feature.py[j];
}
gene[i].nk = gene[mum].nk;
for (j = 0; j < gene[i].nk; j++)
{
gene[i].feature.nz[j] = gene[mum].feature.nz[j];
gene[i].feature.nx[j] = gene[mum].feature.nx[j];
gene[i].feature.ny[j] = gene[mum].feature.ny[j];
}
gene[i].feature.size = ccv_max(gene[i].pk, gene[i].nk);
gene[i].age = 0;
}
for (i = ftnum + mnum + hnum; i < ftnum + mnum + hnum + rnum; i++)
_ccv_bbf_randomize_gene(rng, &gene[i], rows, cols);
timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
for (i = 0; i < pnum; i++)
_ccv_bbf_genetic_fitness(&gene[i]);
}
ccfree(gene);
gsl_rng_free(rng);
return best;
}
#define less_than(fit1, fit2, aux) ((fit1).error < (fit2).error)
static CCV_IMPLEMENT_QSORT(_ccv_bbf_best_qsort, ccv_bbf_gene_t, less_than)
#undef less_than
static ccv_bbf_gene_t _ccv_bbf_best_gene(ccv_bbf_gene_t* gene, int pnum, int point_min, unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_size_t size, double* pw, double* nw)
{
int i;
unsigned int timer = _ccv_bbf_time_measure();
#ifdef USE_OPENMP
#pragma omp parallel for private(i) schedule(dynamic)
#endif
for (i = 0; i < pnum; i++)
gene[i].error = _ccv_bbf_error_rate(&gene[i].feature, posdata, posnum, negdata, negnum, size, pw, nw);
timer = _ccv_bbf_time_measure() - timer;
_ccv_bbf_best_qsort(gene, pnum, 0);
int min_id = 0;
double min_err = gene[0].error;
for (i = 0; i < pnum; i++)
if (gene[i].nk + gene[i].pk >= point_min)
{
min_id = i;
min_err = gene[i].error;
break;
}
printf("local best bbf feature with error %f\n|-size: %d\n|-positive point: ", min_err, gene[min_id].feature.size);
for (i = 0; i < gene[min_id].feature.size; i++)
printf("(%d %d %d), ", gene[min_id].feature.px[i], gene[min_id].feature.py[i], gene[min_id].feature.pz[i]);
printf("\n|-negative point: ");
for (i = 0; i < gene[min_id].feature.size; i++)
printf("(%d %d %d), ", gene[min_id].feature.nx[i], gene[min_id].feature.ny[i], gene[min_id].feature.nz[i]);
printf("\nthe computation takes %d ms\n", timer / 1000);
return gene[min_id];
}
static ccv_bbf_feature_t _ccv_bbf_convex_optimize(unsigned char** posdata, int posnum, unsigned char** negdata, int negnum, ccv_bbf_feature_t* best_feature, ccv_size_t size, double* pw, double* nw)
{
ccv_bbf_gene_t best_gene;
/* seed (random method) */
gsl_rng_env_setup();
gsl_rng* rng = gsl_rng_alloc(gsl_rng_default);
union { unsigned long int li; double db; } dbli;
dbli.db = pw[0] + nw[0];
gsl_rng_set(rng, dbli.li);
int i, j, k, q, p, g, t;
int rows[] = { size.height, size.height >> 1, size.height >> 2 };
int cols[] = { size.width, size.width >> 1, size.width >> 2 };
int pnum = rows[0] * cols[0] + rows[1] * cols[1] + rows[2] * cols[2];
ccv_bbf_gene_t* gene = (ccv_bbf_gene_t*)ccmalloc((pnum * (CCV_BBF_POINT_MAX * 2 + 1) * 2 + CCV_BBF_POINT_MAX * 2 + 1) * sizeof(ccv_bbf_gene_t));
if (best_feature == 0)
{
/* bootstrapping the best feature, start from two pixels, one for positive, one for negative
* the bootstrapping process go like this: first, it will assign a random pixel as positive
* and enumerate every possible pixel as negative, and pick the best one. Then, enumerate every
* possible pixel as positive, and pick the best one, until it converges */
memset(&best_gene, 0, sizeof(ccv_bbf_gene_t));
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
best_gene.feature.pz[i] = best_gene.feature.nz[i] = -1;
best_gene.pk = 1;
best_gene.nk = 0;
best_gene.feature.size = 1;
best_gene.feature.pz[0] = gsl_rng_uniform_int(rng, 3);
best_gene.feature.px[0] = gsl_rng_uniform_int(rng, cols[best_gene.feature.pz[0]]);
best_gene.feature.py[0] = gsl_rng_uniform_int(rng, rows[best_gene.feature.pz[0]]);
for (t = 0; ; ++t)
{
g = 0;
if (t % 2 == 0)
{
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.pz[0] || j != best_gene.feature.px[0] || k != best_gene.feature.py[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.nz[0] = i;
gene[g].feature.nx[0] = j;
gene[g].feature.ny[0] = k;
g++;
}
} else {
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (i != best_gene.feature.nz[0] || j != best_gene.feature.nx[0] || k != best_gene.feature.ny[0])
{
gene[g] = best_gene;
gene[g].pk = gene[g].nk = 1;
gene[g].feature.pz[0] = i;
gene[g].feature.px[0] = j;
gene[g].feature.py[0] = k;
g++;
}
}
printf("bootstrapping round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, 2, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
} else {
best_gene.feature = *best_feature;
best_gene.pk = best_gene.nk = best_gene.feature.size;
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->pz[i] == -1)
{
best_gene.pk = i;
break;
}
for (i = 0; i < CCV_BBF_POINT_MAX; i++)
if (best_feature->nz[i] == -1)
{
best_gene.nk = i;
break;
}
}
/* after bootstrapping, the float search technique will do the following permutations:
* a). add a new point to positive or negative
* b). remove a point from positive or negative
* c). move an existing point in positive or negative to another position
* the three rules applied exhaustively, no heuristic used. */
for (t = 0; ; ++t)
{
g = 0;
for (i = 0; i < 3; i++)
for (j = 0; j < cols[i]; j++)
for (k = 0; k < rows[i]; k++)
if (!_ccv_bbf_exist_gene_feature(&best_gene, j, k, i))
{
/* add positive point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* add negative point */
if (best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
/* refine positive point */
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[q] = i;
gene[g].feature.px[q] = j;
gene[g].feature.py[q] = k;
g++;
}
/* add positive point, remove negative point */
if (best_gene.pk < CCV_BBF_POINT_MAX - 1 && best_gene.nk > 1)
{
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.pz[gene[g].pk] = i;
gene[g].feature.px[gene[g].pk] = j;
gene[g].feature.py[gene[g].pk] = k;
gene[g].pk++;
for (p = q; p < best_gene.nk - 1; p++)
{
gene[g].feature.nz[p] = gene[g].feature.nz[p + 1];
gene[g].feature.nx[p] = gene[g].feature.nx[p + 1];
gene[g].feature.ny[p] = gene[g].feature.ny[p + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
/* refine negative point */
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[q] = i;
gene[g].feature.nx[q] = j;
gene[g].feature.ny[q] = k;
g++;
}
/* add negative point, remove positive point */
if (best_gene.pk > 1 && best_gene.nk < CCV_BBF_POINT_MAX - 1)
{
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
gene[g].feature.nz[gene[g].nk] = i;
gene[g].feature.nx[gene[g].nk] = j;
gene[g].feature.ny[gene[g].nk] = k;
gene[g].nk++;
for (p = q; p < best_gene.pk - 1; p++)
{
gene[g].feature.pz[p] = gene[g].feature.pz[p + 1];
gene[g].feature.px[p] = gene[g].feature.px[p + 1];
gene[g].feature.py[p] = gene[g].feature.py[p + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
}
}
if (best_gene.pk > 1)
for (q = 0; q < best_gene.pk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.pk - 1; i++)
{
gene[g].feature.pz[i] = gene[g].feature.pz[i + 1];
gene[g].feature.px[i] = gene[g].feature.px[i + 1];
gene[g].feature.py[i] = gene[g].feature.py[i + 1];
}
gene[g].feature.pz[gene[g].pk - 1] = -1;
gene[g].pk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
if (best_gene.nk > 1)
for (q = 0; q < best_gene.nk; q++)
{
gene[g] = best_gene;
for (i = q; i < best_gene.nk - 1; i++)
{
gene[g].feature.nz[i] = gene[g].feature.nz[i + 1];
gene[g].feature.nx[i] = gene[g].feature.nx[i + 1];
gene[g].feature.ny[i] = gene[g].feature.ny[i + 1];
}
gene[g].feature.nz[gene[g].nk - 1] = -1;
gene[g].nk--;
gene[g].feature.size = ccv_max(gene[g].pk, gene[g].nk);
g++;
}
gene[g] = best_gene;
g++;
printf("float search round : %d\n", t);
ccv_bbf_gene_t local_gene = _ccv_bbf_best_gene(gene, g, CCV_BBF_POINT_MIN, posdata, posnum, negdata, negnum, size, pw, nw);
if (local_gene.error >= best_gene.error - 1e-10)
break;
best_gene = local_gene;
}
ccfree(gene);
gsl_rng_free(rng);
return best_gene.feature;
}
static int _ccv_write_bbf_stage_classifier(const char* file, ccv_bbf_stage_classifier_t* classifier)
{
FILE* w = fopen(file, "wb");
if (w == 0) return -1;
fprintf(w, "%d\n", classifier->count);
union { float fl; int i; } fli;
fli.fl = classifier->threshold;
fprintf(w, "%d\n", fli.i);
int i, j;
for (i = 0; i < classifier->count; i++)
{
fprintf(w, "%d\n", classifier->feature[i].size);
for (j = 0; j < classifier->feature[i].size; j++)
{
fprintf(w, "%d %d %d\n", classifier->feature[i].px[j], classifier->feature[i].py[j], classifier->feature[i].pz[j]);
fprintf(w, "%d %d %d\n", classifier->feature[i].nx[j], classifier->feature[i].ny[j], classifier->feature[i].nz[j]);
}
union { float fl; int i; } flia, flib;
flia.fl = classifier->alpha[i * 2];
flib.fl = classifier->alpha[i * 2 + 1];
fprintf(w, "%d %d\n", flia.i, flib.i);
}
fclose(w);
return 0;
}
static int _ccv_read_background_data(const char* file, unsigned char** negdata, int* negnum, ccv_size_t size)
{
int stat = 0;
FILE* r = fopen(file, "rb");
if (r == 0) return -1;
stat |= fread(negnum, sizeof(int), 1, r);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < *negnum; i++)
{
negdata[i] = (unsigned char*)ccmalloc(isizs012);
stat |= fread(negdata[i], 1, isizs012, r);
}
fclose(r);
return 0;
}
static int _ccv_write_background_data(const char* file, unsigned char** negdata, int negnum, ccv_size_t size)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fwrite(&negnum, sizeof(int), 1, w);
int i;
int isizs012 = _ccv_width_padding(size.width) * size.height +
_ccv_width_padding(size.width >> 1) * (size.height >> 1) +
_ccv_width_padding(size.width >> 2) * (size.height >> 2);
for (i = 0; i < negnum; i++)
fwrite(negdata[i], 1, isizs012, w);
fclose(w);
return 0;
}
static int _ccv_resume_bbf_cascade_training_state(const char* file, int* i, int* k, int* bg, double* pw, double* nw, int posnum, int negnum)
{
int stat = 0;
FILE* r = fopen(file, "r");
if (r == 0) return -1;
stat |= fscanf(r, "%d %d %d", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
pw[j] = dbi.db;
}
for (j = 0; j < negnum; j++)
{
stat |= fscanf(r, "%d %d", &dbi.i[0], &dbi.i[1]);
nw[j] = dbi.db;
}
fclose(r);
return 0;
}
static int _ccv_save_bbf_cacade_training_state(const char* file, int i, int k, int bg, double* pw, double* nw, int posnum, int negnum)
{
FILE* w = fopen(file, "w");
if (w == 0) return -1;
fprintf(w, "%d %d %d\n", i, k, bg);
int j;
union { double db; int i[2]; } dbi;
for (j = 0; j < posnum; ++j)
{
dbi.db = pw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
for (j = 0; j < negnum; ++j)
{
dbi.db = nw[j];
fprintf(w, "%d %d ", dbi.i[0], dbi.i[1]);
}
fprintf(w, "\n");
fclose(w);
return 0;
}
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
int i, j, k;
/* allocate memory for usage */
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
cascade->count = 0;
cascade->size = size;
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(sizeof(ccv_bbf_stage_classifier_t));
unsigned char** posdata = (unsigned char**)ccmalloc(posnum * sizeof(unsigned char*));
unsigned char** negdata = (unsigned char**)ccmalloc(negnum * sizeof(unsigned char*));
double* pw = (double*)ccmalloc(posnum * sizeof(double));
double* nw = (double*)ccmalloc(negnum * sizeof(double));
float* peval = (float*)ccmalloc(posnum * sizeof(float));
float* neval = (float*)ccmalloc(negnum * sizeof(float));
double inv_balance_k = 1. / params.balance_k;
/* balance factor k, and weighted with 0.01 */
params.balance_k *= 0.01;
inv_balance_k *= 0.01;
int steps[] = { _ccv_width_padding(cascade->size.width),
_ccv_width_padding(cascade->size.width >> 1),
_ccv_width_padding(cascade->size.width >> 2) };
int isizs0 = steps[0] * cascade->size.height;
int isizs01 = isizs0 + steps[1] * (cascade->size.height >> 1);
i = 0;
k = 0;
int bg = 0;
int cacheK = 10;
/* state resume code */
char buf[1024];
sprintf(buf, "%s/stat.txt", dir);
_ccv_resume_bbf_cascade_training_state(buf, &i, &k, &bg, pw, nw, posnum, negnum);
if (i > 0)
{
cascade->count = i;
ccfree(cascade->stage_classifier);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(i * sizeof(ccv_bbf_stage_classifier_t));
for (j = 0; j < i; j++)
{
sprintf(buf, "%s/stage-%d.txt", dir, j);
_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[j]);
}
}
if (k > 0)
cacheK = k;
int rpos, rneg = 0;
if (bg)
{
sprintf(buf, "%s/negs.txt", dir);
_ccv_read_background_data(buf, negdata, &rneg, cascade->size);
}
for (; i < params.layer; i++)
{
if (!bg)
{
rneg = _ccv_prepare_background_data(cascade, bgfiles, bgnum, negdata, negnum);
/* save state of background data */
sprintf(buf, "%s/negs.txt", dir);
_ccv_write_background_data(buf, negdata, rneg, cascade->size);
bg = 1;
}
double totalw;
/* save state of cascade : level, weight etc. */
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
ccv_bbf_stage_classifier_t classifier;
if (k > 0)
{
/* resume state of classifier */
sprintf( buf, "%s/stage-%d.txt", dir, i );
_ccv_read_bbf_stage_classifier(buf, &classifier);
} else {
/* initialize classifier */
for (j = 0; j < posnum; j++)
pw[j] = params.balance_k;
for (j = 0; j < rneg; j++)
nw[j] = inv_balance_k;
classifier.count = k;
classifier.threshold = 0;
classifier.feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * sizeof(ccv_bbf_feature_t));
classifier.alpha = (float*)ccmalloc(cacheK * 2 * sizeof(float));
}
_ccv_prepare_positive_data(posimg, posdata, cascade->size, posnum);
rpos = _ccv_prune_positive_data(cascade, posdata, posnum, cascade->size);
printf("%d postivie data and %d negative data in training\n", rpos, rneg);
/* reweight to 1.00 */
totalw = 0;
for (j = 0; j < rpos; j++)
totalw += pw[j];
for (j = 0; j < rneg; j++)
totalw += nw[j];
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
for (; ; k++)
{
/* get overall true-positive, false-positive rate and threshold */
double tp = 0, fp = 0, etp = 0, efp = 0;
_ccv_bbf_eval_data(&classifier, posdata, rpos, negdata, rneg, cascade->size, peval, neval);
_ccv_sort_32f(peval, rpos, 0);
classifier.threshold = peval[(int)((1. - params.pos_crit) * rpos)] - 1e-6;
for (j = 0; j < rpos; j++)
{
if (peval[j] >= 0)
++tp;
if (peval[j] >= classifier.threshold)
++etp;
}
tp /= rpos; etp /= rpos;
for (j = 0; j < rneg; j++)
{
if (neval[j] >= 0)
++fp;
if (neval[j] >= classifier.threshold)
++efp;
}
fp /= rneg; efp /= rneg;
printf("stage classifier real TP rate : %f, FP rate : %f\n", tp, fp);
printf("stage classifier TP rate : %f, FP rate : %f at threshold : %f\n", etp, efp, classifier.threshold);
if (k > 0)
{
/* save classifier state */
sprintf(buf, "%s/stage-%d.txt", dir, i);
_ccv_write_bbf_stage_classifier(buf, &classifier);
sprintf(buf, "%s/stat.txt", dir);
_ccv_save_bbf_cacade_training_state(buf, i, k, bg, pw, nw, posnum, negnum);
}
if (etp > params.pos_crit && efp < params.neg_crit)
break;
/* TODO: more post-process is needed in here */
/* select the best feature in current distribution through genetic algorithm optimization */
ccv_bbf_feature_t best;
if (params.optimizer == CCV_BBF_GENETIC_OPT)
{
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
} else if (params.optimizer == CCV_BBF_FLOAT_OPT) {
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, 0, cascade->size, pw, nw);
} else {
best = _ccv_bbf_genetic_optimize(posdata, rpos, negdata, rneg, params.feature_number, cascade->size, pw, nw);
best = _ccv_bbf_convex_optimize(posdata, rpos, negdata, rneg, &best, cascade->size, pw, nw);
}
double err = _ccv_bbf_error_rate(&best, posdata, rpos, negdata, rneg, cascade->size, pw, nw);
double rw = (1 - err) / err;
totalw = 0;
/* reweight */
for (j = 0; j < rpos; j++)
{
unsigned char* u8[] = { posdata[j], posdata[j] + isizs0, posdata[j] + isizs01 };
if (!_ccv_run_bbf_feature(&best, steps, u8))
pw[j] *= rw;
pw[j] *= params.balance_k;
totalw += pw[j];
}
for (j = 0; j < rneg; j++)
{
unsigned char* u8[] = { negdata[j], negdata[j] + isizs0, negdata[j] + isizs01 };
if (_ccv_run_bbf_feature(&best, steps, u8))
nw[j] *= rw;
nw[j] *= inv_balance_k;
totalw += nw[j];
}
for (j = 0; j < rpos; j++)
pw[j] = pw[j] / totalw;
for (j = 0; j < rneg; j++)
nw[j] = nw[j] / totalw;
double c = log(rw);
printf("coefficient of feature %d: %f\n", k + 1, c);
classifier.count = k + 1;
/* resizing classifier */
if (k >= cacheK)
{
ccv_bbf_feature_t* feature = (ccv_bbf_feature_t*)ccmalloc(cacheK * 2 * sizeof(ccv_bbf_feature_t));
memcpy(feature, classifier.feature, cacheK * sizeof(ccv_bbf_feature_t));
ccfree(classifier.feature);
float* alpha = (float*)ccmalloc(cacheK * 4 * sizeof(float));
memcpy(alpha, classifier.alpha, cacheK * 2 * sizeof(float));
ccfree(classifier.alpha);
classifier.feature = feature;
classifier.alpha = alpha;
cacheK *= 2;
}
/* setup new feature */
classifier.feature[k] = best;
classifier.alpha[k * 2] = -c;
classifier.alpha[k * 2 + 1] = c;
}
cascade->count = i + 1;
ccv_bbf_stage_classifier_t* stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
memcpy(stage_classifier, cascade->stage_classifier, i * sizeof(ccv_bbf_stage_classifier_t));
ccfree(cascade->stage_classifier);
stage_classifier[i] = classifier;
cascade->stage_classifier = stage_classifier;
k = 0;
bg = 0;
for (j = 0; j < rpos; j++)
ccfree(posdata[j]);
for (j = 0; j < rneg; j++)
ccfree(negdata[j]);
}
ccfree(neval);
ccfree(peval);
ccfree(nw);
ccfree(pw);
ccfree(negdata);
ccfree(posdata);
ccfree(cascade);
}
#else
void ccv_bbf_classifier_cascade_new(ccv_dense_matrix_t** posimg, int posnum, char** bgfiles, int bgnum, int negnum, ccv_size_t size, const char* dir, ccv_bbf_new_param_t params)
{
fprintf(stderr, " ccv_bbf_classifier_cascade_new requires libgsl support, please compile ccv with libgsl.\n");
}
#endif
static int _ccv_is_equal(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
static int _ccv_is_equal_same_class(const void* _r1, const void* _r2, void* data)
{
const ccv_comp_t* r1 = (const ccv_comp_t*)_r1;
const ccv_comp_t* r2 = (const ccv_comp_t*)_r2;
int distance = (int)(r1->rect.width * 0.25 + 0.5);
return r2->id == r1->id &&
r2->rect.x <= r1->rect.x + distance &&
r2->rect.x >= r1->rect.x - distance &&
r2->rect.y <= r1->rect.y + distance &&
r2->rect.y >= r1->rect.y - distance &&
r2->rect.width <= (int)(r1->rect.width * 1.5 + 0.5) &&
(int)(r2->rect.width * 1.5 + 0.5) >= r1->rect.width;
}
ccv_array_t* ccv_bbf_detect_objects(ccv_dense_matrix_t* a, ccv_bbf_classifier_cascade_t** _cascade, int count, ccv_bbf_param_t params)
{
int hr = a->rows / params.size.height;
int wr = a->cols / params.size.width;
double scale = pow(2., 1. / (params.interval + 1.));
int next = params.interval + 1;
int scale_upto = (int)(log((double)ccv_min(hr, wr)) / log(scale));
ccv_dense_matrix_t** pyr = (ccv_dense_matrix_t**)alloca((scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
memset(pyr, 0, (scale_upto + next * 2) * 4 * sizeof(ccv_dense_matrix_t*));
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_resample(a, &pyr[0], 0, a->rows * _cascade[0]->size.height / params.size.height, a->cols * _cascade[0]->size.width / params.size.width, CCV_INTER_AREA);
else
pyr[0] = a;
int i, j, k, t, x, y, q;
for (i = 1; i < ccv_min(params.interval + 1, scale_upto + next * 2); i++)
ccv_resample(pyr[0], &pyr[i * 4], 0, (int)(pyr[0]->rows / pow(scale, i)), (int)(pyr[0]->cols / pow(scale, i)), CCV_INTER_AREA);
for (i = next; i < scale_upto + next * 2; i++)
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4], 0, 0, 0);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 1], 0, 1, 0);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 2], 0, 0, 1);
ccv_sample_down(pyr[i * 4 - next * 4], &pyr[i * 4 + 3], 0, 1, 1);
}
ccv_array_t* idx_seq;
ccv_array_t* seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
ccv_array_t* result_seq = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
/* detect in multi scale */
for (t = 0; t < count; t++)
{
ccv_bbf_classifier_cascade_t* cascade = _cascade[t];
float scale_x = (float) params.size.width / (float) cascade->size.width;
float scale_y = (float) params.size.height / (float) cascade->size.height;
ccv_array_clear(seq);
for (i = 0; i < scale_upto; i++)
{
int dx[] = {0, 1, 0, 1};
int dy[] = {0, 0, 1, 1};
int i_rows = pyr[i * 4 + next * 8]->rows - (cascade->size.height >> 2);
int steps[] = { pyr[i * 4]->step, pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8]->step };
int i_cols = pyr[i * 4 + next * 8]->cols - (cascade->size.width >> 2);
int paddings[] = { pyr[i * 4]->step * 4 - i_cols * 4,
pyr[i * 4 + next * 4]->step * 2 - i_cols * 2,
pyr[i * 4 + next * 8]->step - i_cols };
for (q = 0; q < (params.accurate ? 4 : 1); q++)
{
unsigned char* u8[] = { pyr[i * 4]->data.u8 + dx[q] * 2 + dy[q] * pyr[i * 4]->step * 2, pyr[i * 4 + next * 4]->data.u8 + dx[q] + dy[q] * pyr[i * 4 + next * 4]->step, pyr[i * 4 + next * 8 + q]->data.u8 };
for (y = 0; y < i_rows; y++)
{
for (x = 0; x < i_cols; x++)
{
float sum;
int flag = 1;
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (j = 0; j < cascade->count; ++j, ++classifier)
{
sum = 0;
float* alpha = classifier->alpha;
ccv_bbf_feature_t* feature = classifier->feature;
for (k = 0; k < classifier->count; ++k, alpha += 2, ++feature)
sum += alpha[_ccv_run_bbf_feature(feature, steps, u8)];
if (sum < classifier->threshold)
{
flag = 0;
break;
}
}
if (flag)
{
ccv_comp_t comp;
comp.rect = ccv_rect((int)((x * 4 + dx[q] * 2) * scale_x + 0.5), (int)((y * 4 + dy[q] * 2) * scale_y + 0.5), (int)(cascade->size.width * scale_x + 0.5), (int)(cascade->size.height * scale_y + 0.5));
comp.id = t;
comp.neighbors = 1;
comp.confidence = sum;
ccv_array_push(seq, &comp);
}
u8[0] += 4;
u8[1] += 2;
u8[2] += 1;
}
u8[0] += paddings[0];
u8[1] += paddings[1];
u8[2] += paddings[2];
}
}
scale_x *= scale;
scale_y *= scale;
}
/* the following code from OpenCV's haar feature implementation */
if(params.min_neighbors == 0)
{
for (i = 0; i < seq->rnum; i++)
{
ccv_comp_t* comp = (ccv_comp_t*)ccv_array_get(seq, i);
ccv_array_push(result_seq, comp);
}
} else {
idx_seq = 0;
ccv_array_clear(seq2);
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(seq, &idx_seq, _ccv_is_equal_same_class, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0)
comps[idx].confidence = r1.confidence;
++comps[idx].neighbors;
comps[idx].rect.x += r1.rect.x;
comps[idx].rect.y += r1.rect.y;
comps[idx].rect.width += r1.rect.width;
comps[idx].rect.height += r1.rect.height;
comps[idx].id = r1.id;
comps[idx].confidence = ccv_max(comps[idx].confidence, r1.confidence);
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
{
int n = comps[i].neighbors;
if(n >= params.min_neighbors)
{
ccv_comp_t comp;
comp.rect.x = (comps[i].rect.x * 2 + n) / (2 * n);
comp.rect.y = (comps[i].rect.y * 2 + n) / (2 * n);
comp.rect.width = (comps[i].rect.width * 2 + n) / (2 * n);
comp.rect.height = (comps[i].rect.height * 2 + n) / (2 * n);
comp.neighbors = comps[i].neighbors;
comp.id = comps[i].id;
comp.confidence = comps[i].confidence;
ccv_array_push(seq2, &comp);
}
}
// filter out small face rectangles inside large face rectangles
for(i = 0; i < seq2->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(seq2, i);
int flag = 1;
for(j = 0; j < seq2->rnum; j++)
{
ccv_comp_t r2 = *(ccv_comp_t*)ccv_array_get(seq2, j);
int distance = (int)(r2.rect.width * 0.25 + 0.5);
if(i != j &&
r1.id == r2.id &&
r1.rect.x >= r2.rect.x - distance &&
r1.rect.y >= r2.rect.y - distance &&
r1.rect.x + r1.rect.width <= r2.rect.x + r2.rect.width + distance &&
r1.rect.y + r1.rect.height <= r2.rect.y + r2.rect.height + distance &&
(r2.neighbors > ccv_max(3, r1.neighbors) || r1.neighbors < 3))
{
flag = 0;
break;
}
}
if(flag)
ccv_array_push(result_seq, &r1);
}
ccv_array_free(idx_seq);
ccfree(comps);
}
}
ccv_array_free(seq);
ccv_array_free(seq2);
ccv_array_t* result_seq2;
/* the following code from OpenCV's haar feature implementation */
if (params.flags & CCV_BBF_NO_NESTED)
{
result_seq2 = ccv_array_new(sizeof(ccv_comp_t), 64, 0);
idx_seq = 0;
// group retrieved rectangles in order to filter out noise
int ncomp = ccv_array_group(result_seq, &idx_seq, _ccv_is_equal, 0);
ccv_comp_t* comps = (ccv_comp_t*)ccmalloc((ncomp + 1) * sizeof(ccv_comp_t));
memset(comps, 0, (ncomp + 1) * sizeof(ccv_comp_t));
// count number of neighbors
for(i = 0; i < result_seq->rnum; i++)
{
ccv_comp_t r1 = *(ccv_comp_t*)ccv_array_get(result_seq, i);
int idx = *(int*)ccv_array_get(idx_seq, i);
if (comps[idx].neighbors == 0 || comps[idx].confidence < r1.confidence)
{
comps[idx].confidence = r1.confidence;
comps[idx].neighbors = 1;
comps[idx].rect = r1.rect;
comps[idx].id = r1.id;
}
}
// calculate average bounding box
for(i = 0; i < ncomp; i++)
if(comps[i].neighbors)
ccv_array_push(result_seq2, &comps[i]);
ccv_array_free(result_seq);
ccfree(comps);
} else {
result_seq2 = result_seq;
}
for (i = 1; i < scale_upto + next * 2; i++)
ccv_matrix_free(pyr[i * 4]);
if (params.accurate)
for (i = next * 2; i < scale_upto + next * 2; i++)
{
ccv_matrix_free(pyr[i * 4 + 1]);
ccv_matrix_free(pyr[i * 4 + 2]);
ccv_matrix_free(pyr[i * 4 + 3]);
}
if (params.size.height != _cascade[0]->size.height || params.size.width != _cascade[0]->size.width)
ccv_matrix_free(pyr[0]);
return result_seq2;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_read_classifier_cascade(const char* directory)
{
char buf[1024];
sprintf(buf, "%s/cascade.txt", directory);
int s, i;
FILE* r = fopen(buf, "r");
if (r == 0)
return 0;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
s = fscanf(r, "%d %d %d", &cascade->count, &cascade->size.width, &cascade->size.height);
assert(s > 0);
cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++)
{
sprintf(buf, "%s/stage-%d.txt", directory, i);
if (_ccv_read_bbf_stage_classifier(buf, &cascade->stage_classifier[i]) < 0)
{
cascade->count = i;
break;
}
}
fclose(r);
return cascade;
}
ccv_bbf_classifier_cascade_t* ccv_bbf_classifier_cascade_read_binary(char* s)
{
int i;
ccv_bbf_classifier_cascade_t* cascade = (ccv_bbf_classifier_cascade_t*)ccmalloc(sizeof(ccv_bbf_classifier_cascade_t));
memcpy(&cascade->count, s, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(&cascade->size.width, s, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(&cascade->size.height, s, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier = (ccv_bbf_stage_classifier_t*)ccmalloc(cascade->count * sizeof(ccv_bbf_stage_classifier_t));
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(&classifier->count, s, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(&classifier->threshold, s, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
classifier->feature = (ccv_bbf_feature_t*)ccmalloc(classifier->count * sizeof(ccv_bbf_feature_t));
classifier->alpha = (float*)ccmalloc(classifier->count * 2 * sizeof(float));
memcpy(classifier->feature, s, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(classifier->alpha, s, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
return cascade;
}
int ccv_bbf_classifier_cascade_write_binary(ccv_bbf_classifier_cascade_t* cascade, char* s, int slen)
{
int i;
int len = sizeof(cascade->count) + sizeof(cascade->size.width) + sizeof(cascade->size.height);
ccv_bbf_stage_classifier_t* classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
len += sizeof(classifier->count) + sizeof(classifier->threshold) + classifier->count * sizeof(ccv_bbf_feature_t) + classifier->count * 2 * sizeof(float);
if (slen >= len)
{
memcpy(s, &cascade->count, sizeof(cascade->count)); s += sizeof(cascade->count);
memcpy(s, &cascade->size.width, sizeof(cascade->size.width)); s += sizeof(cascade->size.width);
memcpy(s, &cascade->size.height, sizeof(cascade->size.height)); s += sizeof(cascade->size.height);
classifier = cascade->stage_classifier;
for (i = 0; i < cascade->count; i++, classifier++)
{
memcpy(s, &classifier->count, sizeof(classifier->count)); s += sizeof(classifier->count);
memcpy(s, &classifier->threshold, sizeof(classifier->threshold)); s += sizeof(classifier->threshold);
memcpy(s, classifier->feature, classifier->count * sizeof(ccv_bbf_feature_t)); s += classifier->count * sizeof(ccv_bbf_feature_t);
memcpy(s, classifier->alpha, classifier->count * 2 * sizeof(float)); s += classifier->count * 2 * sizeof(float);
}
}
return len;
}
void ccv_bbf_classifier_cascade_free(ccv_bbf_classifier_cascade_t* cascade)
{
int i;
for (i = 0; i < cascade->count; ++i)
{
ccfree(cascade->stage_classifier[i].feature);
ccfree(cascade->stage_classifier[i].alpha);
}
ccfree(cascade->stage_classifier);
ccfree(cascade);
}
|
expected_output.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
//---------------------------------------------------------------------
// program IS
//---------------------------------------------------------------------
//----------
// Class S:
//----------
//----------
// Class W:
//----------
//----------
// Class A:
//----------
//----------
// Class B:
//----------
//----------
// Class C:
//----------
//----------
// Class D:
//----------
/*************************************/
/*Typedef: if necessary, change the*/
/*size of int here by changing the*/
/*int type to, say, long*/
/*************************************/
typedef int INT_TYPE;
struct anon_NAS_IS_c_97 {
double real;
double imag;
};
typedef struct anon_NAS_IS_c_97 dcomplex;
/********************/
/*Some global info*/
/********************/
INT_TYPE *key_buff_ptr_global;
/*used by full_verify to get*/
/*copies of rank info*/
int passed_verification;
/************************************/
/*These are the three main arrays.*/
/*See SIZE_OF_BUFFERS def above*/
/************************************/
INT_TYPE key_array[1048576];
INT_TYPE key_buff1[65536];
INT_TYPE key_buff2[1048576];
INT_TYPE partial_verify_vals[5];
/**********************/
/*Partial verif info*/
/**********************/
INT_TYPE test_index_array[5];
INT_TYPE test_rank_array[5];
INT_TYPE S_test_index_array[5] = {48427, 17148, 23627, 62548, 4431};
INT_TYPE S_test_rank_array[5] = {0, 18, 346, 64917, 65463};
INT_TYPE W_test_index_array[5] = {357773, 934767, 875723, 898999, 404505};
INT_TYPE W_test_rank_array[5] = {1249, 11698, 1039987, 1043896, 1048018};
INT_TYPE A_test_index_array[5] = {2112377, 662041, 5336171, 3642833, 4250760};
INT_TYPE A_test_rank_array[5] = {104, 17523, 123928, 8288932, 8388264};
INT_TYPE B_test_index_array[5] = {41869, 812306, 5102857, 18232239, 26860214};
INT_TYPE B_test_rank_array[5] = {33422937, 10244, 59149, 33135281, 99};
INT_TYPE C_test_index_array[5] = {44172927, 72999161, 74326391, 129606274, 21736814};
INT_TYPE C_test_rank_array[5] = {61147, 882988, 266290, 133997595, 133525895};
INT_TYPE D_test_index_array[5] = {1317351170, 995930646, 1157283250, 1503301535, 1453734525};
INT_TYPE D_test_rank_array[5] = {1, 36538729, 1978098519, 2145192618, 2147425337};
/***********************/
/*function prototypes*/
/***********************/
double randlc(double *X, double *A);
void full_verify();
void c_print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int passed_verification);
double start[64];
double elapsed[64];
double elapsed_time();
void timer_clear(int n);
void timer_start(int n);
void timer_stop(int n);
double timer_read(int n);
void wtime(double *t);
/*****************************************************************/
/************* R A N D L C ************/
/************* ************/
/************* portable random number generator ************/
/*****************************************************************/
double randlc(double *X, double *A) {
int KS = 0;
double R23, R46, T23, T46;
double T1, T2, T3, T4;
double A1;
double A2;
double X1;
double X2;
double Z;
int i, j;
if(KS == 0) {
R23 = 1.0;
R46 = 1.0;
T23 = 1.0;
T46 = 1.0;
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 23; i++) {
R23 = 0.50 * R23;
T23 = 2.0 * T23;
}
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 1; i <= 46; i++) {
R46 = 0.50 * R46;
T46 = 2.0 * T46;
}
KS = 1;
}
/*Break A into two parts such that A = 2^23 * A1 + A2 and set X = N.*/
T1 = R23 * *A;
j = T1;
A1 = j;
A2 = *A - T23 * A1;
/*Break X into two parts such that X = 2^23 * X1 + X2, compute
Z = A1 * X2 + A2 * X1 (mod 2^23), and then
X = 2^23 * Z + A2 * X2 (mod 2^46).*/
T1 = R23 * *X;
j = T1;
X1 = j;
X2 = *X - T23 * X1;
T1 = A1 * X2 + A2 * X1;
j = R23 * T1;
T2 = j;
Z = T1 - T23 * T2;
T3 = T23 * Z + A2 * X2;
j = R46 * T3;
T4 = j;
*X = T3 - T46 * T4;
return (R46 * *X);
}
/*****************************************************************/
/************* C R E A T E _ S E Q ************/
/*****************************************************************/
void create_seq(double seed, double a) {
double x;
INT_TYPE i, k;
k = (1 << 16) / 4;
/*************** Clava msgError **************
Variable seed could not be categorized into any OpenMP Variable Scopeuse : RWR
****************************************/
for(i = 0; i < (1 << 20); i++) {
x = randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
x += randlc(&seed, &a);
key_array[i] = k * x;
}
}
/*****************************************************************/
/************* F U L L _ V E R I F Y ************/
/*****************************************************************/
void full_verify() {
INT_TYPE i, j;
/*Now, finally, sort the keys:*/
/*key_buff2[] already has the proper information, so do nothing*/
/*Copy keys into work array; keys in key_array will be reassigned.*/
#pragma omp parallel for default(shared) private(i) firstprivate(key_array)
for(i = 0; i < (1 << 20); i++) key_buff2[i] = key_array[i];
/*************** Clava msgError **************
Array access key_array[--key_buff_ptr_global[key_buff2[i]]] which is used for writing has subscript of arrayType --key_buff_ptr_global[key_buff2[i]]
****************************************/
for(i = 0; i < (1 << 20); i++) key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i];
/*Confirm keys correctly sorted: count incorrectly sorted keys, if any*/
j = 0;
#pragma omp parallel for default(shared) private(i) firstprivate(key_array) reduction(+ : j)
for(i = 1; i < (1 << 20); i++) if(key_array[i - 1] > key_array[i]) j++;
if(j != 0) {
printf("Full_verify: number of keys out of sort: %ld\n", (long) j);
}
else passed_verification++;
}
/*****************************************************************/
/************* R A N K ****************/
/*****************************************************************/
void rank(int iteration) {
INT_TYPE i, k;
INT_TYPE *key_buff_ptr, *key_buff_ptr2;
key_array[iteration] = iteration;
key_array[iteration + 10] = (1 << 16) - iteration;
/*Determine where the partial verify test keys are, load into*/
/*top of array bucket_size*/
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(i = 0; i < 5; i++) partial_verify_vals[i] = key_array[test_index_array[i]];
/*Initialize*/
/*Determine the number of keys in each bucket*/
/*Accumulative bucket sizes are the bucket pointers*/
/*Sort into appropriate bucket*/
key_buff_ptr2 = key_array;
/*Clear the work array*/
#pragma omp parallel for default(shared) private(i)
for(i = 0; i < (1 << 16); i++) key_buff1[i] = 0;
/*Ranking of all keys occurs in this section:*/
key_buff_ptr = key_buff1;
/*In this section, the keys themselves are used as their
own indexes to determine how many of each there are: their
individual population*/
/*************** Clava msgError **************
Array access key_buff_ptr[key_buff_ptr2[i]] which is used for writing has subscript of arrayType key_buff_ptr2[i]
****************************************/
for(i = 0; i < (1 << 20); i++) key_buff_ptr[key_buff_ptr2[i]]++;
/*Now they have individual key*/
/*population*/
/*To obtain ranks of each key, successively add the individual key
population*/
/*************** Clava msgError **************
unsolved dependency for arrayAccess key_buff_ptr use : RWR
****************************************/
for(i = 0; i < (1 << 16) - 1; i++) key_buff_ptr[i + 1] += key_buff_ptr[i];
/*This is the partial verify test section*/
/*Observe that test_rank_array vals are*/
/*shifted differently for different cases*/
/*************** Clava msgError **************
Loop contains Invalid Statement -> BreakStmt#486
****************************************/
for(i = 0; i < 5; i++) {
k = partial_verify_vals[i];
/*test vals were put here*/
if(0 < k && k <= (1 << 20) - 1) {
INT_TYPE key_rank = key_buff_ptr[k - 1];
int failed = 0;
switch ('W') {
case 'S':
if(i <= 2) {
if(key_rank != test_rank_array[i] + iteration) failed = 1;
else passed_verification++;
}
else {
if(key_rank != test_rank_array[i] - iteration) failed = 1;
else passed_verification++;
}
break;
case 'W':
if(i < 2) {
if(key_rank != test_rank_array[i] + (iteration - 2)) failed = 1;
else passed_verification++;
}
else {
if(key_rank != test_rank_array[i] - iteration) failed = 1;
else passed_verification++;
}
break;
case 'A':
if(i <= 2) {
if(key_rank != test_rank_array[i] + (iteration - 1)) failed = 1;
else passed_verification++;
}
else {
if(key_rank != test_rank_array[i] - (iteration - 1)) failed = 1;
else passed_verification++;
}
break;
case 'B':
if(i == 1 || i == 2 || i == 4) {
if(key_rank != test_rank_array[i] + iteration) failed = 1;
else passed_verification++;
}
else {
if(key_rank != test_rank_array[i] - iteration) failed = 1;
else passed_verification++;
}
break;
case 'C':
if(i <= 2) {
if(key_rank != test_rank_array[i] + iteration) failed = 1;
else passed_verification++;
}
else {
if(key_rank != test_rank_array[i] - iteration) failed = 1;
else passed_verification++;
}
break;
case 'D':
if(i < 2) {
if(key_rank != test_rank_array[i] + iteration) failed = 1;
else passed_verification++;
}
else {
if(key_rank != test_rank_array[i] - iteration) failed = 1;
else passed_verification++;
}
break;
}
if(failed == 1) printf("Failed partial verification: iteration %d, test key %d\n", iteration, (int) i);
}
}
/*Make copies of rank info for use by full_verify: these variables
in rank are local; making them global slows down the code, probably
since they cannot be made register by compiler*/
if(iteration == 10) key_buff_ptr_global = key_buff_ptr;
}
/*****************************************************************/
/************* M A I N ****************/
/*****************************************************************/
int main(int argc, char **argv) {
int i, iteration;
double timecounter;
FILE *fp;
/*Initialize timers*/
timer_clear(0);
/*Initialize the verification arrays if a valid class*/
/*************** Clava msgError **************
Loop contains Invalid Statement -> BreakStmt#608
****************************************/
for(i = 0; i < 5; i++) switch ('W') {
case 'S':
test_index_array[i] = S_test_index_array[i];
test_rank_array[i] = S_test_rank_array[i];
break;
case 'A':
test_index_array[i] = A_test_index_array[i];
test_rank_array[i] = A_test_rank_array[i];
break;
case 'W':
test_index_array[i] = W_test_index_array[i];
test_rank_array[i] = W_test_rank_array[i];
break;
case 'B':
test_index_array[i] = B_test_index_array[i];
test_rank_array[i] = B_test_rank_array[i];
break;
case 'C':
test_index_array[i] = C_test_index_array[i];
test_rank_array[i] = C_test_rank_array[i];
break;
case 'D':
test_index_array[i] = D_test_index_array[i];
test_rank_array[i] = D_test_rank_array[i];
break;
}
;
/*Printout initial NPB info*/
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER) - IS Benchmark\n\n");
printf(" Size: %ld (class %c)\n", (long) (1 << 20), 'W');
printf(" Iterations: %d\n", 10);
/*Generate random number sequence and subsequent keys on all procs*/
create_seq(314159265.00, 1220703125.00);
/*Random number gen seed*/
/*Random number gen mult*/
/*Do one interation for free (i.e., untimed) to guarantee initialization of
all data and code pages and respective tables*/
rank(1);
/*Start verification counter*/
passed_verification = 0;
if('W' != 'S') printf("\n iteration\n");
/*Start timer*/
timer_start(0);
/*This is the main iteration*/
/*************** Clava msgError **************
Loop Iteration number is too low
****************************************/
for(iteration = 1; iteration <= 10; iteration++) {
if('W' != 'S') printf(" %d\n", iteration);
rank(iteration);
}
/*End of timing, obtain maximum time of all processors*/
timer_stop(0);
timecounter = timer_read(0);
/*This tests that keys are in sequence: sorting of last ranked key seq
occurs here, but is an untimed operation*/
full_verify();
/*The final printout*/
if(passed_verification != 5 * 10 + 1) passed_verification = 0;
c_print_results("IS", 'W', (int) ((1 << 20) / 64), 64, 0, 10, timecounter, ((double) (10 * (1 << 20))) / timecounter / 1000000., "keys ranked", passed_verification);
int exitValue = passed_verification ? 0 : 1;
return exitValue;
}
/**************************/
/*E N D P R O G R A M*/
/**************************/
void c_print_results(char *name, char class, int n1, int n2, int n3, int niter, double t, double mops, char *optype, int passed_verification) {
printf("\n\n %s Benchmark Completed\n", name);
printf(" Class = %c\n", class);
if(n3 == 0) {
long nn = n1;
if(n2 != 0) nn *= n2;
printf(" Size = %12ld\n", nn);
}
else printf(" Size = %4dx%4dx%4d\n", n1, n2, n3);
printf(" Iterations = %12d\n", niter);
printf(" Time in seconds = %12.2f\n", t);
printf(" Mop/s total = %12.2f\n", mops);
printf(" Operation type = %24s\n", optype);
if(passed_verification < 0) printf(" Verification = NOT PERFORMED\n");
else if(passed_verification) printf(" Verification = SUCCESSFUL\n");
else printf(" Verification = UNSUCCESSFUL\n");
}
void wtime(double *t) {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *) 0);
if(sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time() {
double t;
wtime(&t);
return (t);
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear(int n) {
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start(int n) {
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop(int n) {
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read(int n) {
return (elapsed[n]);
}
|
171. Qr Decompose.h | /**
* @file
* \brief Library functions to compute [QR
* decomposition] of a given matrix.
*
*/
#ifndef QR_DECOMPOSE_H
#define QR_DECOMPOSE_H
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/**
* function to display matrix on stdout
*/
void print_matrix(double **A, /**< matrix to print */
int M, /**< number of rows of matrix */
int N) /**< number of columns of matrix */
{
for (int row = 0; row < M; row++)
{
for (int col = 0; col < N; col++) printf("% 9.3g\t", A[row][col]);
putchar('\n');
}
putchar('\n');
}
/**
* Compute dot product of two vectors of equal lengths
*
* If \f$\vec{a}=\left[a_0,a_1,a_2,...,a_L\right]\f$ and
* \f$\vec{b}=\left[b_0,b_1,b_1,...,b_L\right]\f$ then
* \f$\vec{a}\cdot\vec{b}=\displaystyle\sum_{i=0}^L a_i\times b_i\f$
*
* \returns \f$\vec{a}\cdot\vec{b}\f$
*/
double vector_dot(double *a, double *b, int L)
{
double mag = 0.f;
int i;
#ifdef _OPENMP
// parallelize on threads
#pragma omp parallel for reduction(+ : mag)
#endif
for (i = 0; i < L; i++) mag += a[i] * b[i];
return mag;
}
/**
* Compute magnitude of vector.
*
* If \f$\vec{a}=\left[a_0,a_1,a_2,...,a_L\right]\f$ then
* \f$\left|\vec{a}\right|=\sqrt{\displaystyle\sum_{i=0}^L a_i^2}\f$
*
* \returns \f$\left|\vec{a}\right|\f$
*/
double vector_mag(double *vector, int L)
{
double dot = vector_dot(vector, vector, L);
return sqrt(dot);
}
/**
* Compute projection of vector \f$\vec{a}\f$ on \f$\vec{b}\f$ defined as
* \f[\text{proj}_\vec{b}\vec{a}=\frac{\vec{a}\cdot\vec{b}}{\left|\vec{b}\right|^2}\vec{b}\f]
*
* \returns NULL if error, otherwise pointer to output
*/
double *vector_proj(double *a, double *b, double *out, int L)
{
const double num = vector_dot(a, b, L);
const double deno = vector_dot(b, b, L);
if (deno == 0) /*! check for division by zero */
return NULL;
const double scalar = num / deno;
int i;
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (i = 0; i < L; i++) out[i] = scalar * b[i];
return out;
}
/**
* Compute vector subtraction
*
* \f$\vec{c}=\vec{a}-\vec{b}\f$
*
* \returns pointer to output vector
*/
double *vector_sub(double *a, /**< minuend */
double *b, /**< subtrahend */
double *out, /**< resultant vector */
int L /**< length of vectors */
)
{
int i;
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (i = 0; i < L; i++) out[i] = a[i] - b[i];
return out;
}
/**
* Decompose matrix \f$A\f$ using [Gram-Schmidt
*process](https://en.wikipedia.org/wiki/QR_decomposition).
*
* \f{eqnarray*}{
* \text{given that}\quad A &=&
*\left[\mathbf{a}_1,\mathbf{a}_2,\ldots,\mathbf{a}_{N-1},\right]\\
* \text{where}\quad\mathbf{a}_i &=&
*\left[a_{0i},a_{1i},a_{2i},\ldots,a_{(M-1)i}\right]^T\quad\ldots\mbox{(column
*vectors)}\\
* \text{then}\quad\mathbf{u}_i &=& \mathbf{a}_i
*-\sum_{j=0}^{i-1}\text{proj}_{\mathbf{u}_j}\mathbf{a}_i\\
* \mathbf{e}_i &=&\frac{\mathbf{u}_i}{\left|\mathbf{u}_i\right|}\\
* Q &=& \begin{bmatrix}\mathbf{e}_0 & \mathbf{e}_1 & \mathbf{e}_2 & \dots &
*\mathbf{e}_{N-1}\end{bmatrix}\\
* R &=& \begin{bmatrix}\langle\mathbf{e}_0\,,\mathbf{a}_0\rangle &
*\langle\mathbf{e}_1\,,\mathbf{a}_1\rangle &
*\langle\mathbf{e}_2\,,\mathbf{a}_2\rangle & \dots \\
* 0 & \langle\mathbf{e}_1\,,\mathbf{a}_1\rangle &
*\langle\mathbf{e}_2\,,\mathbf{a}_2\rangle & \dots\\
* 0 & 0 & \langle\mathbf{e}_2\,,\mathbf{a}_2\rangle & \dots\\
* \vdots & \vdots & \vdots & \ddots
* \end{bmatrix}\\
* \f}
*/
void qr_decompose(double **A, /**< input matrix to decompose */
double **Q, /**< output decomposed matrix */
double **R, /**< output decomposed matrix */
int M, /**< number of rows of matrix A */
int N /**< number of columns of matrix A */
)
{
double *col_vector = (double *)malloc(M * sizeof(double));
double *col_vector2 = (double *)malloc(M * sizeof(double));
double *tmp_vector = (double *)malloc(M * sizeof(double));
for (int i = 0; i < N;
i++) /* for each column => R is a square matrix of NxN */
{
int j;
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (j = 0; j < i; j++) /* second dimension of column */
R[i][j] = 0.; /* make R upper triangular */
/* get corresponding Q vector */
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (j = 0; j < M; j++)
{
tmp_vector[j] = A[j][i]; /* accumulator for uk */
col_vector[j] = A[j][i];
}
for (j = 0; j < i; j++)
{
for (int k = 0; k < M; k++) col_vector2[k] = Q[k][j];
vector_proj(col_vector, col_vector2, col_vector2, M);
vector_sub(tmp_vector, col_vector2, tmp_vector, M);
}
double mag = vector_mag(tmp_vector, M);
#ifdef _OPENMP
// parallelize on threads
#pragma omp for
#endif
for (j = 0; j < M; j++) Q[j][i] = tmp_vector[j] / mag;
/* compute upper triangular values of R */
for (int kk = 0; kk < M; kk++) col_vector[kk] = Q[kk][i];
for (int k = i; k < N; k++)
{
for (int kk = 0; kk < M; kk++) col_vector2[kk] = A[kk][k];
R[i][k] = vector_dot(col_vector, col_vector2, M);
}
}
free(col_vector);
free(col_vector2);
free(tmp_vector);
}
#endif // QR_DECOMPOSE_H
|
par_relax_more.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision: 2.4 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* a few more relaxation schemes: Chebychev, FCF-Jacobi, CG and Steepest
* Descent
*
*****************************************************************************/
#include "headers.h"
#include "float.h"
int hypre_LINPACKcgtql1(int*,double *,double *,int *);
/******************************************************************************
*
*use max norm to estimate largest eigenvalue
*
*****************************************************************************/
int hypre_ParCSRMaxEigEstimate(hypre_ParCSRMatrix *A, /* matrix to relax with */
int scale, /* scale by diagonal?*/
double *max_eig)
{
double e_max;
double row_sum, max_norm;
double *col_val;
double temp;
double diag_value;
int pos_diag, neg_diag;
HYPRE_BigInt start_row, end_row;
int row_length;
HYPRE_BigInt *col_ind;
int j;
HYPRE_BigInt i;
/* estimate with the inf-norm of A - should be ok for SPD matrices */
start_row = hypre_ParCSRMatrixFirstRowIndex(A);
end_row = hypre_ParCSRMatrixLastRowIndex(A);
max_norm = 0.0;
pos_diag = neg_diag = 0;
for ( i = start_row; i <= end_row; i++ )
{
HYPRE_ParCSRMatrixGetRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
row_sum = 0.0;
for (j = 0; j < row_length; j++)
{
if (j==0) diag_value = fabs(col_val[j]);
row_sum += fabs(col_val[j]);
if ( col_ind[j] == i && col_val[j] > 0.0 ) pos_diag++;
if ( col_ind[j] == i && col_val[j] < 0.0 ) neg_diag++;
}
if (scale)
{
if (diag_value != 0.0)
row_sum = row_sum/diag_value;
}
if ( row_sum > max_norm ) max_norm = row_sum;
HYPRE_ParCSRMatrixRestoreRow((HYPRE_ParCSRMatrix) A, i, &row_length, &col_ind, &col_val);
}
/* get max across procs */
MPI_Allreduce(&max_norm, &temp, 1, MPI_DOUBLE, MPI_MAX, hypre_ParCSRMatrixComm(A));
max_norm = temp;
/* from Charles */
if ( pos_diag == 0 && neg_diag > 0 ) max_norm = - max_norm;
/* eig estimates */
e_max = max_norm;
/* return */
*max_eig = e_max;
return hypre_error_flag;
}
/******************************************************************************
use CG to get the eigenvalue estimate
scale means get eig est of (D^{-1/2} A D^{-1/2}
******************************************************************************/
int hypre_ParCSRMaxEigEstimateCG(hypre_ParCSRMatrix *A, /* matrix to relax with */
int scale, /* scale by diagonal?*/
int max_iter,
double *max_eig,
double *min_eig)
{
int i, j, err;
hypre_ParVector *p;
hypre_ParVector *s;
hypre_ParVector *r;
hypre_ParVector *ds;
hypre_ParVector *u;
double *tridiag;
double *trioffd;
double lambda_max, max_row_sum;
double beta, gamma = 0.0, alpha, sdotp, gamma_old, alphainv;
double diag;
double lambda_min;
double *s_data, *p_data, *ds_data, *u_data;
int local_size = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
/* check the size of A - don't iterate more than the size */
HYPRE_BigInt size = hypre_ParCSRMatrixGlobalNumRows(A);
if (size < (HYPRE_BigInt) max_iter)
max_iter = (int) size;
/* create some temp vectors: p, s, r , ds, u*/
r = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(r);
hypre_ParVectorSetPartitioningOwner(r,0);
p = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(p);
hypre_ParVectorSetPartitioningOwner(p,0);
s = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(s);
hypre_ParVectorSetPartitioningOwner(s,0);
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
u = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(u);
hypre_ParVectorSetPartitioningOwner(u,0);
/* point to local data */
s_data = hypre_VectorData(hypre_ParVectorLocalVector(s));
p_data = hypre_VectorData(hypre_ParVectorLocalVector(p));
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
/* make room for tri-diag matrix */
tridiag = hypre_CTAlloc(double, max_iter+1);
trioffd = hypre_CTAlloc(double, max_iter+1);
for (i=0; i < max_iter + 1; i++)
{
tridiag[i] = 0;
trioffd[i] = 0;
}
/* set residual to random */
hypre_ParVectorSetRandomValues(r,1);
if (scale)
{
for (i = 0; i < local_size; i++)
{
diag = A_diag_data[A_diag_i[i]];
ds_data[i] = 1/sqrt(diag);
}
}
else
{
/* set ds to 1 */
hypre_ParVectorSetConstantValues(ds,1.0);
}
/* gamma = <r,Cr> */
gamma = hypre_ParVectorInnerProd(r,p);
/* for the initial filling of the tridiag matrix */
beta = 1.0;
max_row_sum = 0.0;
i = 0;
while (i < max_iter)
{
/* s = C*r */
/* TO DO: C = diag scale */
hypre_ParVectorCopy(r, s);
/*gamma = <r,Cr> */
gamma_old = gamma;
gamma = hypre_ParVectorInnerProd(r,s);
if (i==0)
{
beta = 1.0;
/* p_0 = C*r */
hypre_ParVectorCopy(s, p);
}
else
{
/* beta = gamma / gamma_old */
beta = gamma / gamma_old;
/* p = s + beta p */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for (j=0; j < local_size; j++)
{
p_data[j] = s_data[j] + beta*p_data[j];
}
}
if (scale)
{
/* s = D^{-1/2}A*D^{-1/2}*p */
for (j = 0; j < local_size; j++)
{
u_data[j] = ds_data[j] * p_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, s);
for (j = 0; j < local_size; j++)
{
s_data[j] = ds_data[j] * s_data[j];
}
}
else
{
/* s = A*p */
hypre_ParCSRMatrixMatvec(1.0, A, p, 0.0, s);
}
/* <s,p> */
sdotp = hypre_ParVectorInnerProd(s,p);
/* alpha = gamma / <s,p> */
alpha = gamma/sdotp;
/* get tridiagonal matrix */
alphainv = 1.0/alpha;
tridiag[i+1] = alphainv;
tridiag[i] *= beta;
tridiag[i] += alphainv;
trioffd[i+1] = alphainv;
trioffd[i] *= sqrt(beta);
/* x = x + alpha*p */
/* don't need */
/* r = r - alpha*s */
hypre_ParVectorAxpy( -alpha, s, r);
i++;
}
/* eispack routine - eigenvalues return in tridiag and ordered*/
hypre_LINPACKcgtql1(&i,tridiag,trioffd,&err);
lambda_max = tridiag[i-1];
lambda_min = tridiag[0];
/* printf("linpack max eig est = %g\n", lambda_max);*/
/* printf("linpack min eig est = %g\n", lambda_min);*/
hypre_ParVectorDestroy(r);
hypre_ParVectorDestroy(s);
hypre_ParVectorDestroy(p);
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(u);
/* return */
*max_eig = lambda_max;
*min_eig = lambda_min;
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation - iterative implementation
(See Saad "Iterative Methods for Sparse Systems", Alg. 12.1
plus we can scale residual by inv(M) = 1/diag(A) so that we have Chebyshev
accelerated jacobi)
NOT USED CURRENTLY
******************************************************************************/
int hypre_ParCSRRelax_Cheby3(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
double max_eig, /* u.b = max. e-val est.*1.1 */
double eig_ratio, /* l.b = max_eig/eig ratio */
int order, /* polynomial order */
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *v2 /*another temp vector */ )
{
/* See Saad "Iterative Methods for Sparse Systems", Alg. 12.1 */
/* plus we can scale residual by inv(M) = 1/diag(A) so that we have Chebyshev
accelerated jacobi */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
double *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
double *dk = hypre_VectorData(hypre_ParVectorLocalVector(v2));
double theta, delta, sigma;
double p_k, p_kp1, temp1, temp2, diag, scale;
double upper_bound, lower_bound;
int i, j;
int num_rows = hypre_CSRMatrixNumRows(A_diag);
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
lower_bound = max_eig/eig_ratio;
/* parameters */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
sigma = theta/delta;
/* set v = f */
hypre_ParVectorCopy(f, v);
/* get residual: v = f-A*u */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
/* p_0*/
p_k = 1/sigma;
/*first order */
temp1 = 1/theta;
/*d_0* = 1/theta * inv(M)r_0 - M is Jacobi*/
/* x_1 = x_0 + d_0 */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_i[i]];
scale = temp1/diag;
dk[i] = scale*v_data[i];
u_data[i] += dk[i];
}
/* higher order */
for (j = 1; j < order; j++)
{
/* get residual: v = f-A*u */
hypre_ParVectorCopy(f, v);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
p_kp1 = 1.0/(2.0*sigma - p_k);
temp1 = p_kp1*p_k;
temp2 = 2.0*p_kp1/delta;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = A_diag_data[A_diag_i[i]];
scale = temp2/diag;
dk[i] = temp1*dk[i] + scale*v_data[i];
u_data[i] += dk[i];
}
p_k = p_kp1;
}
return hypre_error_flag;
}
/******************************************************************************
Chebyshev relaxation
Can specify order 1-4 (this is the order of the resid polynomial)- here we
explicitly code the coefficients (instead of
iteratively determining)
variant 0: standard chebyshev
this is rlx 11 if scale = 0, and 16 if scale == 1
variant 1: modified cheby: T(t)* f(t) where f(t) = (1-b/t)
this is rlx 15 if scale = 0, and 17 if scale == 1
ratio indicates the percentage of the whole spectrum to use (so .5
means half, and .1 means 10percent)
*******************************************************************************/
int hypre_ParCSRRelax_Cheby(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
double max_eig,
double min_eig,
double eig_ratio,
int order, /* polynomial order */
int scale, /* scale by diagonal?*/
int variant,
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *r /*another temp vector */ )
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
double *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
double *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
double *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
double theta, delta;
double den;
double upper_bound, lower_bound;
int i, j;
int num_rows = hypre_CSRMatrixNumRows(A_diag);
double coefs[5];
double mult;
double *orig_u;
double tmp_d;
int cheby_order;
double *ds_data, *tmp_data;
double diag;
hypre_ParVector *ds;
hypre_ParVector *tmp_vec;
/* u = u + p(A)r */
if (order > 4)
order = 4;
if (order < 1)
order = 1;
/* we are using the order of p(A) */
cheby_order = order -1;
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
/* lower_bound = max_eig/eig_ratio; */
lower_bound = (upper_bound - min_eig)* eig_ratio + min_eig;
/* theta and delta */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
if (variant == 1 )
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less that resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* (del - t + 2*th)/(th^2 + del*th) */
den = (theta*theta + delta*theta);
coefs[0] = (delta + 2*theta)/den;
coefs[1] = -1.0/den;
break;
case 2: /* (4*del*th - del^2 - t*(2*del + 6*th) + 2*t^2 + 6*th^2)/(2*del*th^2 - del^2*th - del^3 + 2*th^3)*/
den = 2*delta*theta*theta - delta*delta*theta - pow(delta,3) + 2*pow(theta,3);
coefs[0] = (4*delta*theta - pow(delta,2) + 6*pow(theta,2))/den;
coefs[1] = -(2*delta + 6*theta)/den;
coefs[2] = 2/den;
break;
case 3: /* -(6*del^2*th - 12*del*th^2 - t^2*(4*del + 16*th) + t*(12*del*th - 3*del^2 + 24*th^2) + 3*del^3 + 4*t^3 - 16*th^3)/(4*del*th^3 - 3*del^2*th^2 - 3*del^3*th + 4*th^4)*/
den = - (4*delta*pow(theta,3) - 3*pow(delta,2)*pow(theta,2) - 3*pow(delta,3)*theta + 4*pow(theta,4) );
coefs[0] = (6*pow(delta,2)*theta - 12*delta*pow(theta,2) + 3*pow(delta,3) - 16*pow(theta,3) )/den;
coefs[1] = (12*delta*theta - 3*pow(delta,2) + 24*pow(theta,2))/den;
coefs[2] = -( 4*delta + 16*theta)/den;
coefs[3] = 4/den;
break;
}
}
else /* standard chebyshev */
{
switch ( cheby_order ) /* these are the corresponding cheby polynomials: u = u_o + s(A)r_0 - so order is
one less thatn resid poly: r(t) = 1 - t*s(t) */
{
case 0:
coefs[0] = 1.0/theta;
break;
case 1: /* ( 2*t - 4*th)/(del^2 - 2*th^2) */
den = delta*delta - 2*theta*theta;
coefs[0] = -4*theta/den;
coefs[1] = 2/den;
break;
case 2: /* (3*del^2 - 4*t^2 + 12*t*th - 12*th^2)/(3*del^2*th - 4*th^3)*/
den = 3*(delta*delta)*theta - 4*(theta*theta*theta);
coefs[0] = (3*delta*delta - 12 *theta*theta)/den;
coefs[1] = 12*theta/den;
coefs[2] = -4/den;
break;
case 3: /*(t*(8*del^2 - 48*th^2) - 16*del^2*th + 32*t^2*th - 8*t^3 + 32*th^3)/(del^4 - 8*del^2*th^2 + 8*th^4)*/
den = pow(delta,4) - 8*delta*delta*theta*theta + 8*pow(theta,4);
coefs[0] = (32*pow(theta,3)- 16*delta*delta*theta)/den;
coefs[1] = (8*delta*delta - 48*theta*theta)/den;
coefs[2] = 32*theta/den;
coefs[3] = -8/den;
break;
}
}
orig_u = hypre_CTAlloc(double, num_rows);
if (!scale)
{
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for ( i = 0; i < num_rows; i++ )
{
orig_u[i] = u_data[i];
u_data[i] = r_data[i] * coefs[cheby_order];
}
for (i = cheby_order - 1; i >= 0; i-- )
{
hypre_ParCSRMatrixMatvec(1.0, A, u, 0.0, v);
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = mult * r_data[j] + v_data[j];
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i) schedule(static)
#endif
for ( i = 0; i < num_rows; i++ )
{
u_data[i] = orig_u[i] + u_data[i];
}
}
else /* scaling! */
{
/*grab 1/sqrt(diagonal) */
ds = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(ds);
hypre_ParVectorSetPartitioningOwner(ds,0);
ds_data = hypre_VectorData(hypre_ParVectorLocalVector(ds));
tmp_vec = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(tmp_vec);
hypre_ParVectorSetPartitioningOwner(tmp_vec,0);
tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(tmp_vec));
/* get ds_data and get scaled residual: r = D^(-1/2)f -
* D^(-1/2)A*u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,diag) schedule(static)
#endif
for (j = 0; j < num_rows; j++)
{
diag = A_diag_data[A_diag_i[j]];
ds_data[j] = 1/sqrt(diag);
r_data[j] = ds_data[j] * f_data[j];
}
hypre_ParCSRMatrixMatvec(-1.0, A, u, 0.0, tmp_vec);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
r_data[j] += ds_data[j] * tmp_data[j];
}
/* save original u, then start
the iteration by multiplying r by the cheby coef.*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
orig_u[j] = u_data[j]; /* orig, unscaled u */
u_data[j] = r_data[j] * coefs[cheby_order];
}
/* now do the other coefficients */
for (i = cheby_order - 1; i >= 0; i-- )
{
/* v = D^(-1/2)AD^(-1/2)u */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_data[j] = ds_data[j] * u_data[j];
}
hypre_ParCSRMatrixMatvec(1.0, A, tmp_vec, 0.0, v);
/* u_new = coef*r + v*/
mult = coefs[i];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j,tmp_d) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
tmp_d = ds_data[j]* v_data[j];
u_data[j] = mult * r_data[j] + tmp_d;
}
} /* end of cheby_order loop */
/* now we have to scale u_data before adding it to u_orig*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) schedule(static)
#endif
for ( j = 0; j < num_rows; j++ )
{
u_data[j] = orig_u[j] + ds_data[j]*u_data[j];
}
hypre_ParVectorDestroy(ds);
hypre_ParVectorDestroy(tmp_vec);
}/* end of scaling code */
hypre_TFree(orig_u);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_BoomerAMGRelax_FCFJacobi
*--------------------------------------------------------------------------*/
int hypre_BoomerAMGRelax_FCFJacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
int *cf_marker,
double relax_weight,
hypre_ParVector *u,
hypre_ParVector *Vtemp)
{
int i;
int relax_points[3];
int relax_type = 0;
hypre_ParVector *Ztemp = NULL;
relax_points[0] = -1; /*F */
relax_points[1] = 1; /*C */
relax_points[2] = -1; /*F */
/* if we are on the coarsest level ,the cf_marker will be null
and we just do one sweep regular jacobi */
if (cf_marker == NULL)
{
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
0,
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
else
{
for (i=0; i < 3; i++)
hypre_BoomerAMGRelax(A,
f,
cf_marker,
relax_type,
relax_points[i],
relax_weight,
0.0,
NULL,
u,
Vtemp, Ztemp);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* CG Smoother - if the CG setup is cheap, we can just do it here - for
* now we are doing it in the setup, so this function is a
* bit unnecessary ...
*
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_CG( HYPRE_Solver solver,
hypre_ParCSRMatrix *A,
hypre_ParVector *f,
hypre_ParVector *u,
int num_its)
{
int num_iterations;
double final_res_norm;
HYPRE_PCGSetMaxIter(solver, num_its); /* max iterations */
HYPRE_ParCSRPCGSolve(solver, (HYPRE_ParCSRMatrix)A, (HYPRE_ParVector)f, (HYPRE_ParVector)u);
HYPRE_PCGGetNumIterations(solver, &num_iterations);
HYPRE_PCGGetFinalRelativeResidualNorm(solver, &final_res_norm);
#if 0
{
int myid;
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
if (myid ==0)
{
printf(" -----CG PCG Iterations = %d\n", num_iterations);
printf(" -----CG PCG Final Relative Residual Norm = %e\n", final_res_norm);
}
}
#endif
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* Steepest Descent (Smoother) (Not used)
*
* We don't check for convergence - just do a fixed number of iterations
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_SD( hypre_ParCSRMatrix *A,/* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
hypre_ParVector *u,/* initial/updated approximation */
hypre_ParVector *r, /* temporary vector */
hypre_ParVector *p, /*another temp vector */
int num_its)
{
int i;
double alpha, tmp1, tmp2;
/* get residual: r = f - A*u */
hypre_ParVectorCopy(f, r); /* copy f into r */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
for (i = 0; i < num_its; i++)
{
/*p = A*r */
hypre_ParCSRMatrixMatvec(1.0, A, r, 0.0, p);
/* alpha = (r,r)/(p,r) */
tmp1 = hypre_ParVectorInnerProd( r, r);
tmp2 = hypre_ParVectorInnerProd( p, r);
if (tmp2 == 0.0)
break;
alpha = tmp1/tmp2;
/* u = u + alpha*r */
hypre_ParVectorAxpy( alpha, r, u);
/* r = r - alpha * p */
hypre_ParVectorAxpy( -alpha, p, r);
}
return hypre_error_flag;
}
/* tql1.f --
this is the eispack translation - from Barry Smith in Petsc
Note that this routine always uses real numbers (not complex) even
if the underlying matrix is Hermitian. This is because the Lanczos
process applied to Hermitian matrices always produces a real,
symmetric tridiagonal matrix.
*/
double hypre_LINPACKcgpthy(double*,double*);
int hypre_LINPACKcgtql1(int *n,double *d,double *e,int *ierr)
{
/* System generated locals */
int i__1,i__2;
double d__1,d__2,c_b10 = 1.0;
/* Local variables */
double c,f,g,h;
int i,j,l,m;
double p,r,s,c2,c3 = 0.0;
int l1,l2;
double s2 = 0.0;
int ii;
double dl1,el1;
int mml;
double tst1,tst2;
/* THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL1, */
/* NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND */
/* WILKINSON. */
/* HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). */
/* THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC */
/* TRIDIAGONAL MATRIX BY THE QL METHOD. */
/* ON INPUT */
/* N IS THE ORDER OF THE MATRIX. */
/* D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. */
/* E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX */
/* IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. */
/* ON OUTPUT */
/* D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN */
/* ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND */
/* ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE */
/* THE SMALLEST EIGENVALUES. */
/* E HAS BEEN DESTROYED. */
/* IERR IS SET TO */
/* ZERO FOR NORMAL RETURN, */
/* J IF THE J-TH EIGENVALUE HAS NOT BEEN */
/* DETERMINED AFTER 30 ITERATIONS. */
/* CALLS CGPTHY FOR DSQRT(A*A + B*B) . */
/* QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, */
/* MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY
*/
/* THIS VERSION DATED AUGUST 1983. */
/* ------------------------------------------------------------------
*/
double ds;
--e;
--d;
*ierr = 0;
if (*n == 1) {
goto L1001;
}
i__1 = *n;
for (i = 2; i <= i__1; ++i) {
e[i - 1] = e[i];
}
f = 0.;
tst1 = 0.;
e[*n] = 0.;
i__1 = *n;
for (l = 1; l <= i__1; ++l) {
j = 0;
h = (d__1 = d[l],fabs(d__1)) + (d__2 = e[l],fabs(d__2));
if (tst1 < h) {
tst1 = h;
}
/* .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... */
i__2 = *n;
for (m = l; m <= i__2; ++m) {
tst2 = tst1 + (d__1 = e[m],fabs(d__1));
if (tst2 == tst1) {
goto L120;
}
/* .......... E(N) IS ALWAYS ZERO,SO THERE IS NO EXIT */
/* THROUGH THE BOTTOM OF THE LOOP .......... */
}
L120:
if (m == l) {
goto L210;
}
L130:
if (j == 30) {
goto L1000;
}
++j;
/* .......... FORM SHIFT .......... */
l1 = l + 1;
l2 = l1 + 1;
g = d[l];
p = (d[l1] - g) / (e[l] * 2.);
r = hypre_LINPACKcgpthy(&p,&c_b10);
ds = 1.0; if (p < 0.0) ds = -1.0;
d[l] = e[l] / (p + ds*r);
d[l1] = e[l] * (p + ds*r);
dl1 = d[l1];
h = g - d[l];
if (l2 > *n) {
goto L145;
}
i__2 = *n;
for (i = l2; i <= i__2; ++i) {
d[i] -= h;
}
L145:
f += h;
/* .......... QL TRANSFORMATION .......... */
p = d[m];
c = 1.;
c2 = c;
el1 = e[l1];
s = 0.;
mml = m - l;
/* .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... */
i__2 = mml;
for (ii = 1; ii <= i__2; ++ii) {
c3 = c2;
c2 = c;
s2 = s;
i = m - ii;
g = c * e[i];
h = c * p;
r = hypre_LINPACKcgpthy(&p,&e[i]);
e[i + 1] = s * r;
s = e[i] / r;
c = p / r;
p = c * d[i] - s * g;
d[i + 1] = h + s * (c * g + s * d[i]);
}
p = -s * s2 * c3 * el1 * e[l] / dl1;
e[l] = s * p;
d[l] = c * p;
tst2 = tst1 + (d__1 = e[l],fabs(d__1));
if (tst2 > tst1) {
goto L130;
}
L210:
p = d[l] + f;
/* .......... ORDER EIGENVALUES .......... */
if (l == 1) {
goto L250;
}
/* .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... */
i__2 = l;
for (ii = 2; ii <= i__2; ++ii) {
i = l + 2 - ii;
if (p >= d[i - 1]) {
goto L270;
}
d[i] = d[i - 1];
}
L250:
i = 1;
L270:
d[i] = p;
}
goto L1001;
/* .......... SET ERROR -- NO CONVERGENCE TO AN */
/* EIGENVALUE AFTER 30 ITERATIONS .......... */
L1000:
*ierr = l;
L1001:
return 0;
} /* cgtql1_ */
double hypre_LINPACKcgpthy(double *a,double *b)
{
/* System generated locals */
double ret_val,d__1,d__2,d__3;
/* Local variables */
double p,r,s,t,u;
/* FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW */
/* Computing MAX */
d__1 = fabs(*a),d__2 = fabs(*b);
p = hypre_max(d__1,d__2);
if (!p) {
goto L20;
}
/* Computing MIN */
d__2 = fabs(*a),d__3 = fabs(*b);
/* Computing 2nd power */
d__1 = hypre_min(d__2,d__3) / p;
r = d__1 * d__1;
L10:
t = r + 4.;
if (t == 4.) {
goto L20;
}
s = r / t;
u = s * 2. + 1.;
p = u * p;
/* Computing 2nd power */
d__1 = s / u;
r = d__1 * d__1 * r;
goto L10;
L20:
ret_val = p;
return ret_val;
} /* cgpthy_ */
#if 0
int hypre_ParCSRRelax_Cheby2(hypre_ParCSRMatrix *A, /* matrix to relax with */
hypre_ParVector *f, /* right-hand side */
double max_eig, /* u.b = max. e-val est.*1.1 */
double eig_ratio, /* l.b = max_eig/eig ratio */
int order, /* polynomial order */
hypre_ParVector *u, /* initial/updated approximation */
hypre_ParVector *v /* temporary vector */,
hypre_ParVector *v2 /*another temp vector */ )
{
/* See Saad "Iterative Methods for Sparse Systems", Alg. 12.1 */
/* r_m = Tm(r_0) - plus we scale residual by SCALE = (1-A/u.b.) */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
double *f_data = hypre_VectorData(hypre_ParVectorLocalVector(f));
double *v_data = hypre_VectorData(hypre_ParVectorLocalVector(v));
double *dk = hypre_VectorData(hypre_ParVectorLocalVector(v2));
double theta, delta, sigma;
double p_k, p_kp1, temp1, temp2, diag, scale;
double zero = 0.0;
double upper_bound, lower_bound;
int i, j;
int num_rows = hypre_CSRMatrixNumRows(A_diag);
hypre_ParVector *Ztemp;
Ztemp = hypre_ParVectorCreate(hypre_ParCSRMatrixComm(A),
hypre_ParCSRMatrixGlobalNumRows(A),
hypre_ParCSRMatrixRowStarts(A));
hypre_ParVectorInitialize(Ztemp);
hypre_ParVectorSetPartitioningOwner(Ztemp,0);
/* make sure we are large enough - Adams et al. 2003 */
upper_bound = max_eig * 1.1;
lower_bound = max_eig/eig_ratio;
/* parameters */
theta = (upper_bound + lower_bound)/2;
delta = (upper_bound - lower_bound)/2;
sigma = theta/delta;
/* set v = f */
hypre_ParVectorCopy(f, v);
/* get residual: v = f-A*u */
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
/* p_0*/
p_k = 1/sigma;
/*first order */
temp1 = 1/theta;
/*d_0* = 1/theta * SCALE*r_0 */
/* x_1 = x_0 + d_0 */
/* NEW PART*/
/* z = A*v */
hypre_ParCSRMatrixMatvec(1.0, A, v, 0.0, Ztemp);
/* v = v - Ztemp/u.b. */
scale = -1.0/upper_bound;
hypre_ParVectorAxpy(scale, Ztemp, v);
/* END NEW */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = 1;
scale = temp1/diag;
dk[i] = scale*v_data[i];
u_data[i] += dk[i];
}
/* higher order */
for (j = 1; j < order; j++)
{
/* get residual: v = f-A*u */
hypre_ParVectorCopy(f, v);
hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, v);
p_kp1 = 1.0/(2.0*sigma - p_k);
temp1 = p_kp1*p_k;
temp2 = 2.0*p_kp1/delta;
/* NEW PART*/
/* still do jacobi */
/* z = A*v */
hypre_ParCSRMatrixMatvec(1.0, A, v, 0.0, Ztemp);
/* v = v - Ztemp/u.b. */
scale = -1.0/upper_bound;
hypre_ParVectorAxpy(scale, Ztemp, v);
/* END NEW */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,scale) schedule(static)
#endif
for (i = 0; i < num_rows; i++)
{
diag = 1;
scale = temp2/diag;
dk[i] = temp1*dk[i] + scale*v_data[i];
u_data[i] += dk[i];
}
p_k = p_kp1;
}
hypre_ParVectorDestroy(Ztemp);
return hypre_error_flag;
}
#endif
/*------------------------------------------------------------------------
theta = a_ii /sum off_d((a_ij))
we want the min.
*--------------------------------------------------------------------------*/
int hypre_ParCSRComputeTheta(hypre_ParCSRMatrix *A,
double *theta_est)
{
int i, j;
int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
int *A_diag_I = hypre_CSRMatrixI(A_diag);
int *A_diag_J = hypre_CSRMatrixJ(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_I = hypre_CSRMatrixI(A_offd);
int *A_offd_J = hypre_CSRMatrixJ(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
double diag, offd_sum;
double theta, ratio;
int min_row = 0;
int my_id;
MPI_Comm_rank(MPI_COMM_WORLD,&my_id);
theta = 1e9;
for (i = 0; i < num_rows; i++)
{
/* get the diag element of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
if (A_diag_J[j] == i)
{
diag = A_diag_data[j];
/* break; */
}
else
{
if (A_diag_data[j] > 0.0)
{
printf("MYID = %d, row = %d, DIAG_col = %d, val = %g \n", my_id, i, A_diag_J[j], A_diag_data[j]);
}
}
}
/* get the offd part of the ith row */
offd_sum = 0.0;
if (num_cols_offd )
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
{
offd_sum += fabs(A_offd_data[j]);
if (A_offd_data[j] > 0.0)
{
printf("MYID = %d, row = %d, OFFD_col = %d, val = %g \n", my_id, i, A_offd_J[j], A_offd_data[j]);
}
}
}
if (offd_sum > 0.0)
{
ratio = diag/offd_sum;
theta = hypre_min(theta, ratio);
if (theta == ratio)
min_row = i;
}
}
printf("MYID = %d, Min Row = %d\n",my_id, min_row);
*theta_est = theta;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing" (with or without CF)
*--------------------------------------------------------------------------*/
int hypre_ParCSRComputeL1Norms(hypre_ParCSRMatrix *A,
int option,
int *cf_marker,
double **l1_norm_ptr)
{
int i, j;
int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
int *A_diag_I = hypre_CSRMatrixI(A_diag);
int *A_diag_J = hypre_CSRMatrixJ(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_I = hypre_CSRMatrixI(A_offd);
int *A_offd_J = hypre_CSRMatrixJ(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
double *l1_norm = hypre_CTAlloc(double, num_rows);
int *cf_marker_offd = NULL;
int cf_diag;
double diag;
if (cf_marker != NULL)
{
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns of A
*-------------------------------------------------------------------*/
int index;
int num_sends;
int start;
int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
if (option == 1)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = 0; i < num_rows; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = 0; i < num_rows; i++)
{
/* Add the diag element of the ith row */
diag = l1_norm[i] = fabs(A_diag_data[A_diag_I[i]]);
if (cf_marker == NULL)
{
/* Add the scaled l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the scaled CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
/* Handle negative definite matrices */
for (i = 0; i < num_rows; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = 0; i < num_rows; i++)
if (fabs(l1_norm[i]) < DBL_EPSILON)
{
hypre_error_in_arg(1);
break;
}
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRComputeL1Norms Threads
*
* Compute the l1 norms of the rows of a given matrix, depending on
* the option parameter:
*
* option 1 = Compute the l1 norm of the rows
* option 2 = Compute the l1 norm of the (processor) off-diagonal
* part of the rows plus the diagonal of A
* option 3 = Compute the l2 norm^2 of the rows
* option 4 = Truncated version of option 2 based on Remark 6.2 in "Multigrid
* Smoothers for Ultra-Parallel Computing" (with or without CF)
*--------------------------------------------------------------------------*/
int hypre_ParCSRComputeL1NormsThreads(hypre_ParCSRMatrix *A,
int option,
int num_threads,
int *cf_marker,
double **l1_norm_ptr)
{
int i, j, k;
int num_rows = hypre_ParCSRMatrixNumRows(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
int *A_diag_I = hypre_CSRMatrixI(A_diag);
int *A_diag_J = hypre_CSRMatrixJ(A_diag);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_I = hypre_CSRMatrixI(A_offd);
int *A_offd_J = hypre_CSRMatrixJ(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
double *l1_norm = hypre_CTAlloc(double, num_rows);
int ii, ns, ne, rest, size;
double res;
int *cf_marker_offd = NULL;
int cf_diag;
double diag;
if (cf_marker != NULL)
{
/*-------------------------------------------------------------------
* Get the CF_marker data for the off-processor columns of A
*-------------------------------------------------------------------*/
int index;
int num_sends;
int start;
int *int_buf_data = NULL;
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
if (num_cols_offd)
cf_marker_offd = hypre_CTAlloc(int, num_cols_offd);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends))
int_buf_data = hypre_CTAlloc(int,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
{
int_buf_data[index++] = cf_marker[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
cf_marker_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data);
}
#define HYPRE_SMP_PRIVATE i,ii,j,k,ns,ne,res,rest,size,cf_diag,diag
#include "../utilities/hypre_smp_forloop.h"
for (k = 0; k < num_threads; k++)
{
size = num_rows/num_threads;
rest = num_rows - size*num_threads;
if (k < rest)
{
ns = k*size+k;
ne = (k+1)*size+k+1;
}
else
{
ns = k*size+rest;
ne = (k+1)*size+rest;
}
if (option == 1)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the CF l1 norm of the diag part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
if (cf_diag == cf_marker[A_diag_J[j]])
l1_norm[i] += fabs(A_diag_data[j]);
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 2)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
l1_norm[i] += fabs(A_diag_data[j]);
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += fabs(A_offd_data[j]);
}
}
}
}
else if (option == 3)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
l1_norm[i] += A_diag_data[j] * A_diag_data[j];
if (num_cols_offd)
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += A_offd_data[j] * A_offd_data[j];
}
}
else if (option == 4)
{
for (i = ns; i < ne; i++)
{
l1_norm[i] = 0.0;
if (cf_marker == NULL)
{
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if (ii == i || ii < ns || ii >= ne)
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
else
{
cf_diag = cf_marker[i];
/* Add the diagonal and the local off-thread part of the ith row */
for (j = A_diag_I[i]; j < A_diag_I[i+1]; j++)
{
ii = A_diag_J[j];
if ((ii == i || ii < ns || ii >= ne) &&
(cf_diag == cf_marker[A_diag_J[j]]))
{
if (ii == i)
{
diag = fabs(A_diag_data[j]);
l1_norm[i] += fabs(A_diag_data[j]);
}
else
l1_norm[i] += 0.5*fabs(A_diag_data[j]);
}
}
/* Add the CF l1 norm of the offd part of the ith row */
if (num_cols_offd)
{
for (j = A_offd_I[i]; j < A_offd_I[i+1]; j++)
if (cf_diag == cf_marker_offd[A_offd_J[j]])
l1_norm[i] += 0.5*fabs(A_offd_data[j]);
}
}
/* Truncate according to Remark 6.2 */
if (l1_norm[i] <= 4.0/3.0*diag)
l1_norm[i] = diag;
}
}
/* Handle negative definite matrices */
for (i = ns; i < ne; i++)
if (A_diag_data[A_diag_I[i]] < 0)
l1_norm[i] = -l1_norm[i];
for (i = ns; i < ne; i++)
if (fabs(l1_norm[i]) < DBL_EPSILON)
{
hypre_error_in_arg(1);
break;
}
}
hypre_TFree(cf_marker_offd);
*l1_norm_ptr = l1_norm;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1 (Symm GS / SSOR)
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_L1( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
double relax_weight,
double omega,
double *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
int n = hypre_CSRMatrixNumRows(A_diag);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
double *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
double *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
double *Vtemp_data = hypre_VectorData(Vtemp_local);
double *Vext_data;
double *v_buf_data;
double *tmp_data;
int i, j;
int ii, jj;
int ns, ne, size, rest;
int relax_error = 0;
int num_sends;
int index, start;
int num_procs, num_threads, my_id ;
double zero = 0.0;
double res, res2;
hypre_Vector *Ztemp_local;
double *Ztemp_data;
MPI_Comm_size(comm,&num_procs);
MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(double,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(double,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
if (relax_weight == 1 && omega == 1)
{
/*tmp_data = hypre_CTAlloc(double,n);*/
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
double c1 = omega*relax_weight;
double c2 = omega*(1.0-relax_weight);
/* tmp_data = hypre_CTAlloc(double,n); */
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
for (i = ne-1; i > ns-1; i--) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii > i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return(relax_error);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_GS (GS / SOR) (NOT SYM)
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_L1_GS( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
double relax_weight,
double omega,
double *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp,
hypre_ParVector *Ztemp)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
int n = hypre_CSRMatrixNumRows(A_diag);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
double *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
double *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
double *Vtemp_data = hypre_VectorData(Vtemp_local);
double *Vext_data;
double *v_buf_data;
double *tmp_data;
int i, j;
int ii, jj;
int ns, ne, size, rest;
int relax_error = 0;
int num_sends;
int index, start;
int num_procs, num_threads, my_id ;
double zero = 0.0;
double res, res2;
hypre_Vector *Ztemp_local;
double *Ztemp_data;
MPI_Comm_size(comm,&num_procs);
MPI_Comm_rank(comm,&my_id);
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(double,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(double,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg,i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
Ztemp_local = hypre_ParVectorLocalVector(Ztemp);
Ztemp_data = hypre_VectorData(Ztemp_local);
if (relax_weight == 1 && omega == 1)
{
/*tmp_data = hypre_CTAlloc(double,n);*/
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += res / l1_norms[i];
}
}
}
}
else
{
double c1 = omega*relax_weight;
double c2 = omega*(1.0-relax_weight);
/* tmp_data = hypre_CTAlloc(double,n); */
tmp_data = Ztemp_data;
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
tmp_data[i] = u_data[i];
}
#define HYPRE_SMP_PRIVATE i,ii,j,jj,ns,ne,res,rest,size
#include "../utilities/hypre_smp_forloop.h"
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != zero)
{
res2 = 0.0;
res = f_data[i];
Vtemp_data[i] = u_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
{
res -= A_diag_data[jj] * u_data[ii];
if (ii < i)
res2 += A_diag_data[jj] * (Vtemp_data[ii] - u_data[ii]);
}
else
res -= A_diag_data[jj] * tmp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (c1*res + c2*res2) / l1_norms[i];
}
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return(relax_error);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRRelax_L1_Jacobi (allows CF)
u += w D^{-1}(f - A u), where D_ii = ||A(i,:)||_1
*--------------------------------------------------------------------------*/
int hypre_ParCSRRelax_L1_Jacobi( hypre_ParCSRMatrix *A,
hypre_ParVector *f,
int *cf_marker,
int relax_points,
double relax_weight,
double *l1_norms,
hypre_ParVector *u,
hypre_ParVector *Vtemp )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
double *A_diag_data = hypre_CSRMatrixData(A_diag);
int *A_diag_i = hypre_CSRMatrixI(A_diag);
int *A_diag_j = hypre_CSRMatrixJ(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
int *A_offd_i = hypre_CSRMatrixI(A_offd);
double *A_offd_data = hypre_CSRMatrixData(A_offd);
int *A_offd_j = hypre_CSRMatrixJ(A_offd);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
int n = hypre_CSRMatrixNumRows(A_diag);
int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
hypre_Vector *u_local = hypre_ParVectorLocalVector(u);
double *u_data = hypre_VectorData(u_local);
hypre_Vector *f_local = hypre_ParVectorLocalVector(f);
double *f_data = hypre_VectorData(f_local);
hypre_Vector *Vtemp_local = hypre_ParVectorLocalVector(Vtemp);
double *Vtemp_data = hypre_VectorData(Vtemp_local);
double *Vext_data;
double *v_buf_data;
int i, j;
int ii, jj;
int num_sends;
int index, start;
int num_procs, my_id ;
double zero = 0.0;
double res;
MPI_Comm_size(comm,&num_procs);
MPI_Comm_rank(comm,&my_id);
if (num_procs > 1)
{
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
v_buf_data = hypre_CTAlloc(double,
hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
Vext_data = hypre_CTAlloc(double,num_cols_offd);
if (num_cols_offd)
{
A_offd_j = hypre_CSRMatrixJ(A_offd);
A_offd_data = hypre_CSRMatrixData(A_offd);
}
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
v_buf_data[index++]
= u_data[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 1, comm_pkg, v_buf_data,
Vext_data);
}
/*-----------------------------------------------------------------
* Copy current approximation into temporary vector.
*-----------------------------------------------------------------*/
#define HYPRE_SMP_PRIVATE i
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
Vtemp_data[i] = u_data[i];
}
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (relax_points == 0 || cf_marker == NULL)
{
#define HYPRE_SMP_PRIVATE i,ii,jj,res
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight*res)/l1_norms[i];
}
}
}
/*-----------------------------------------------------------------
* Relax only C or F points as determined by relax_points.
*-----------------------------------------------------------------*/
else
{
#define HYPRE_SMP_PRIVATE i,ii,jj,res
#include "../utilities/hypre_smp_forloop.h"
for (i = 0; i < n; i++)
{
/*-----------------------------------------------------------
* If i is of the right type ( C or F ) and diagonal is
* nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if (cf_marker[i] == relax_points
&& A_diag_data[A_diag_i[i]] != zero)
{
res = f_data[i];
for (jj = A_diag_i[i]; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * Vtemp_data[ii];
}
for (jj = A_offd_i[i]; jj < A_offd_i[i+1]; jj++)
{
ii = A_offd_j[jj];
res -= A_offd_data[jj] * Vext_data[ii];
}
u_data[i] += (relax_weight * res)/l1_norms[i];
}
}
}
if (num_procs > 1)
{
hypre_TFree(Vext_data);
hypre_TFree(v_buf_data);
}
return 0;
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 16;
tile_size[1] = 16;
tile_size[2] = 32;
tile_size[3] = 64;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
graph.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GRAPH_H_
#define GRAPH_H_
#include <cinttypes>
#include <iostream>
#include <type_traits>
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: CSRGraph
Author: Scott Beamer
Simple container for graph in CSR format
- Intended to be constructed by a Builder
- To make weighted, set DestID_ template type to NodeWeight
- MakeInverse parameter controls whether graph stores its inverse
*/
// Used to hold node & weight, with another node it makes a weighted edge
template <typename NodeID_, typename WeightT_>
struct NodeWeight {
NodeID_ v;
WeightT_ w;
NodeWeight() {}
NodeWeight(NodeID_ v) : v(v), w(1) {}
NodeWeight(NodeID_ v, WeightT_ w) : v(v), w(w) {}
bool operator< (const NodeWeight& rhs) const {
return v == rhs.v ? w < rhs.w : v < rhs.v;
}
// doesn't check WeightT_s, needed to remove duplicate edges
bool operator== (const NodeWeight& rhs) const {
return v == rhs.v;
}
// doesn't check WeightT_s, needed to remove self edges
bool operator== (const NodeID_& rhs) const {
return v == rhs;
}
operator NodeID_() {
return v;
}
};
template <typename NodeID_, typename WeightT_>
std::ostream& operator<<(std::ostream& os,
const NodeWeight<NodeID_, WeightT_>& nw) {
os << nw.v << " " << nw.w;
return os;
}
template <typename NodeID_, typename WeightT_>
std::istream& operator>>(std::istream& is, NodeWeight<NodeID_, WeightT_>& nw) {
is >> nw.v >> nw.w;
return is;
}
// Syntatic sugar for an edge
template <typename SrcT, typename DstT = SrcT>
struct EdgePair {
SrcT u;
DstT v;
EdgePair() {}
EdgePair(SrcT u, DstT v) : u(u), v(v) {}
};
// SG = serialized graph, these types are for writing graph to file
typedef int32_t SGID;
typedef EdgePair<SGID> SGEdge;
typedef int64_t SGOffset;
template <class NodeID_, class DestID_ = NodeID_, bool MakeInverse = true>
class CSRGraph {
// Used to access neighbors of vertex, basically sugar for iterators
class Neighborhood {
NodeID_ n_;
DestID_** g_index_;
public:
Neighborhood(NodeID_ n, DestID_** g_index) : n_(n), g_index_(g_index) {}
typedef DestID_* iterator;
iterator begin() { return g_index_[n_]; }
iterator end() { return g_index_[n_+1]; }
};
void ReleaseResources() {
if (out_index_ != nullptr)
delete[] out_index_;
if (out_neighbors_ != nullptr)
delete[] out_neighbors_;
if (directed_) {
if (in_index_ != nullptr)
delete[] in_index_;
if (in_neighbors_ != nullptr)
delete[] in_neighbors_;
}
}
public:
CSRGraph() : directed_(false), num_nodes_(-1), num_edges_(-1),
out_index_(nullptr), out_neighbors_(nullptr),
in_index_(nullptr), in_neighbors_(nullptr) {}
CSRGraph(int64_t num_nodes, DestID_** index, DestID_* neighs) :
directed_(false), num_nodes_(num_nodes),
out_index_(index), out_neighbors_(neighs),
in_index_(index), in_neighbors_(neighs) {
num_edges_ = (out_index_[num_nodes_] - out_index_[0]) / 2;
}
CSRGraph(int64_t num_nodes, DestID_** out_index, DestID_* out_neighs,
DestID_** in_index, DestID_* in_neighs) :
directed_(true), num_nodes_(num_nodes),
out_index_(out_index), out_neighbors_(out_neighs),
in_index_(in_index), in_neighbors_(in_neighs) {
num_edges_ = out_index_[num_nodes_] - out_index_[0];
}
CSRGraph(CSRGraph&& other) : directed_(other.directed_),
num_nodes_(other.num_nodes_), num_edges_(other.num_edges_),
out_index_(other.out_index_), out_neighbors_(other.out_neighbors_),
in_index_(other.in_index_), in_neighbors_(other.in_neighbors_) {
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
~CSRGraph() {
ReleaseResources();
}
CSRGraph& operator=(CSRGraph&& other) {
if (this != &other) {
ReleaseResources();
directed_ = other.directed_;
num_edges_ = other.num_edges_;
num_nodes_ = other.num_nodes_;
out_index_ = other.out_index_;
out_neighbors_ = other.out_neighbors_;
in_index_ = other.in_index_;
in_neighbors_ = other.in_neighbors_;
other.num_edges_ = -1;
other.num_nodes_ = -1;
other.out_index_ = nullptr;
other.out_neighbors_ = nullptr;
other.in_index_ = nullptr;
other.in_neighbors_ = nullptr;
}
return *this;
}
bool directed() const {
return directed_;
}
int64_t num_nodes() const {
return num_nodes_;
}
int64_t num_edges() const {
return num_edges_;
}
int64_t num_edges_directed() const {
return directed_ ? num_edges_ : 2*num_edges_;
}
int64_t out_degree(NodeID_ v) const {
return out_index_[v+1] - out_index_[v];
}
int64_t in_degree(NodeID_ v) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return in_index_[v+1] - in_index_[v];
}
Neighborhood out_neigh(NodeID_ n) const {
return Neighborhood(n, out_index_);
}
Neighborhood in_neigh(NodeID_ n) const {
static_assert(MakeInverse, "Graph inversion disabled but reading inverse");
return Neighborhood(n, in_index_);
}
void PrintStats() const {
std::cout << "Graph has " << num_nodes_ << " nodes and "
<< num_edges_ << " ";
if (!directed_)
std::cout << "un";
std::cout << "directed edges for degree: ";
std::cout << num_edges_/num_nodes_ << std::endl;
}
void PrintTopology() const {
for (NodeID_ i=0; i < num_nodes_; i++) {
std::cout << i << ": ";
for (DestID_ j : out_neigh(i)) {
std::cout << j << " ";
}
std::cout << std::endl;
}
}
static DestID_** GenIndex(const pvector<SGOffset> &offsets, DestID_* neighs) {
NodeID_ length = offsets.size();
DestID_** index = new DestID_*[length];
#pragma omp parallel for
for (NodeID_ n=0; n < length; n++)
index[n] = neighs + offsets[n];
return index;
}
pvector<SGOffset> VertexOffsets(bool in_graph = false) const {
pvector<SGOffset> offsets(num_nodes_+1);
for (NodeID_ n=0; n < num_nodes_+1; n++)
if (in_graph)
offsets[n] = in_index_[n] - in_index_[0];
else
offsets[n] = out_index_[n] - out_index_[0];
return offsets;
}
Range<NodeID_> vertices() const {
return Range<NodeID_>(num_nodes());
}
private:
bool directed_;
int64_t num_nodes_;
int64_t num_edges_;
DestID_** out_index_;
DestID_* out_neighbors_;
DestID_** in_index_;
DestID_* in_neighbors_;
};
#endif // GRAPH_H_
|
lu.pluto_orio.par.c | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
double L[N][N];
double U[N][N];
double A[N][N+13];
void print_array()
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
fprintf(stderr, "%lf ", round(A[i][j]));
if (j%80 == 79) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
void init_arrays()
{
int i, j, k;
/* have to initialize this matrix properly to prevent
* division by zero
*/
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
L[i][j] = 0.0;
U[i][j] = 0.0;
}
}
for (i=0; i<N; i++) {
for (j=0; j<=i; j++) {
L[i][j] = i+j+1;
U[j][i] = i+j+1;
}
}
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
for (k=0; k<N; k++) {
A[i][j] += L[i][k]*U[k][j];
}
}
}
}
double rtclock()
{
struct timezone tzp;
struct timeval tp;
int stat;
gettimeofday (&tp, &tzp);
return (tp.tv_sec + tp.tv_usec*1.0e-6);
}
int main()
{
init_arrays();
double annot_t_start=0, annot_t_end=0, annot_t_total=0;
int annot_i;
for (annot_i=0; annot_i<REPS; annot_i++)
{
annot_t_start = rtclock();
/*@ begin PerfTuning (
def build
{
arg build_command = 'icc -O3 -openmp -I/usr/local/icc/include -lm';
}
def performance_counter
{
arg repetitions = 1;
}
def performance_params
{
# param T1_1[] = [1,16,32,64,128];
# param T1_2[] = [1,16,32,64,128];
# param T1_3[] = [1,16,32,64,128];
# param T2_1[] = [1,4,8,16,32];
# param T2_2[] = [1,4,8,16,32];
# param T2_3[] = [1,4,8,16,32];
param T1_1[] = [64];
param T1_2[] = [256];
param T1_3[] = [64];
param T2_1[] = [1];
param T2_2[] = [1];
param T2_3[] = [1];
constraint c1 = (T1_1*T2_1<=1024 and T1_1*T2_1<=1024 and T1_1*T2_1<=1024);
constraint c2 = ((T1_1 == T1_3) and (T2_1 == T2_3));
param U1[] = [1];
param U2[] = [1];
param U3[] = [7];
constraint c3 = (U1*U2*U3<=512);
param PERM[] = [
#[0,1,2],
#[0,2,1],
#[1,0,2],
#[1,2,0],
[2,0,1],
#[2,1,0],
];
param PAR[] = [True];
param SCREP[] = [False];
param IVEC[] = [True];
}
def search
{
arg algorithm = 'Exhaustive';
# arg algorithm = 'Simplex';
# arg time_limit = 5;
# arg total_runs = 1;
}
def input_params
{
param N[] = [1024];
}
def input_vars
{
arg decl_file = 'decl_code.h';
arg init_file = 'init_code.c';
}
) @*/
/**-- (Generated by Orio)
Best performance cost:
0.201184
Tuned for specific problem sizes:
N = 1024
Best performance parameters:
IVEC = True
PAR = True
PERM = [2, 0, 1]
SCREP = False
T1_1 = 64
T1_2 = 256
T1_3 = 64
T2_1 = 1
T2_2 = 1
T2_3 = 1
U1 = 1
U2 = 1
U3 = 7
--**/
register int i,j,k;
register int c1t, c2t, c3t, c4t, c5t, c6t, c7t, c8t, c9t, c10t, c11t, c12t;
register int newlb_c1, newlb_c2, newlb_c3, newlb_c4, newlb_c5, newlb_c6,
newlb_c7, newlb_c8, newlb_c9, newlb_c10, newlb_c11, newlb_c12;
register int newub_c1, newub_c2, newub_c3, newub_c4, newub_c5, newub_c6,
newub_c7, newub_c8, newub_c9, newub_c10, newub_c11, newub_c12;
/*@ begin PolySyn(
parallel = PAR;
tiles = [T1_1,T1_2,T1_3,T2_1,T2_2,T2_3];
permut = PERM;
unroll_factors = [U1,U2,U3];
scalar_replace = SCREP;
vectorize = IVEC;
profiling_code = 'lu_profiling.c';
compile_cmd = 'gcc';
compile_opts = '-lm';
) @*/
#include <math.h>
#include <assert.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
int c1, c2, c3, c4, c5, c6, c7, c8, c9;
register int lb, ub, lb1, ub1, lb2, ub2;
/* Generated from PLuTo-produced CLooG file by CLooG v0.14.1 64 bits in 0.05s. */
for (c1=-1;c1<=floord(5*N-9,256);c1++) {
lb1=max(max(ceild(32*c1-127,160),ceild(64*c1-N+2,64)),0);
ub1=min(floord(64*c1+63,64),floord(N-1,256));
#pragma omp parallel for shared(c1,lb1,ub1) private(c2,c3,c4,c5,c6,c7,c8,c9)
for (c2=lb1; c2<=ub1; c2++) {
for (c3=max(ceild(32*c1-32*c2-1953,2016),ceild(32*c1-32*c2-31,32));c3<=floord(N-1,64);c3++) {
if (c1 == c2+c3) {
for (c7=max(64*c3,0);c7<=min(min(N-2,64*c3+62),256*c2+254);c7++) {
for (c8=max(c7+1,256*c2);c8<=min(N-1,256*c2+255);c8++) {
A[c7][c8]=A[c7][c8]/A[c7][c7] ;
for (c9=c7+1;c9<=min(N-1,64*c3+63);c9++) {
A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ;
}
}
}
}
/*@ begin Loop(
transform Composite(
permut = [['c9', 'c7', 'c8']],
regtile = (['c7', 'c8', 'c9'],[1, 1, 7]),
scalarreplace = (False, 'double'),
vector = (True, ['ivdep','vector always']))
for (c7=max(0,64*c1-64*c2);c7<=min(min(256*c2+254,64*c1-64*c2+63),64*c3-1);c7++) {
for (c8=max(c7+1,256*c2);c8<=min(256*c2+255,N-1);c8++) {
for (c9=64*c3;c9<=min(N-1,64*c3+63);c9++) {
A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8] ;
}
}
}
) @*/{
for (c9t=64*c3; c9t<=min(N-1,64*c3+63)-6; c9t=c9t+7) {
for (c7=max(0,64*c1-64*c2); c7<=min(min(256*c2+254,64*c1-64*c2+63),64*c3-1); c7++ ) {
register int cbv_1, cbv_2;
cbv_1=max(c7+1,256*c2);
cbv_2=min(256*c2+255,N-1);
#pragma ivdep
#pragma vector always
for (c8=cbv_1; c8<=cbv_2; c8++ ) {
A[c9t][c8]=A[c9t][c8]-A[c9t][c7]*A[c7][c8];
A[(c9t+1)][c8]=A[(c9t+1)][c8]-A[(c9t+1)][c7]*A[c7][c8];
A[(c9t+2)][c8]=A[(c9t+2)][c8]-A[(c9t+2)][c7]*A[c7][c8];
A[(c9t+3)][c8]=A[(c9t+3)][c8]-A[(c9t+3)][c7]*A[c7][c8];
A[(c9t+4)][c8]=A[(c9t+4)][c8]-A[(c9t+4)][c7]*A[c7][c8];
A[(c9t+5)][c8]=A[(c9t+5)][c8]-A[(c9t+5)][c7]*A[c7][c8];
A[(c9t+6)][c8]=A[(c9t+6)][c8]-A[(c9t+6)][c7]*A[c7][c8];
}
}
}
for (c9=c9t; c9<=min(N-1,64*c3+63); c9=c9+1) {
for (c7=max(0,64*c1-64*c2); c7<=min(min(256*c2+254,64*c1-64*c2+63),64*c3-1); c7++ ) {
register int cbv_3, cbv_4;
cbv_3=max(c7+1,256*c2);
cbv_4=min(256*c2+255,N-1);
#pragma ivdep
#pragma vector always
for (c8=cbv_3; c8<=cbv_4; c8++ ) {
A[c9][c8]=A[c9][c8]-A[c9][c7]*A[c7][c8];
}
}
}
}
/*@ end @*/
if ((-c1 == -c2-c3) && (c1 <= min(floord(320*c2+191,64),floord(64*c2+N-65,64)))) {
for (c8=max(256*c2,64*c1-64*c2+64);c8<=min(256*c2+255,N-1);c8++) {
A[64*c1-64*c2+63][c8]=A[64*c1-64*c2+63][c8]/A[64*c1-64*c2+63][64*c1-64*c2+63] ;
}
}
}
}
}
/* End of CLooG code */
/*@ end @*/
/*@ end @*/
annot_t_end = rtclock();
annot_t_total += annot_t_end - annot_t_start;
}
annot_t_total = annot_t_total / REPS;
#ifndef TEST
printf("%f\n", annot_t_total);
#else
{
int i, j;
for (i=0; i<N; i++) {
for (j=0; j<N; j++) {
if (j%100==0)
printf("\n");
printf("%f ",A[i][j]);
}
printf("\n");
}
}
#endif
return ((int) A[0][0]);
}
|
GB_concat_bitmap_template.c | //------------------------------------------------------------------------------
// GB_concat_bitmap_template: concatenate a tile into a bitmap matrix
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
{
//--------------------------------------------------------------------------
// get C and the tile A
//--------------------------------------------------------------------------
#ifndef GB_ISO_CONCAT
const bool A_iso = A->iso ;
const GB_CTYPE *restrict Ax = (GB_CTYPE *) A->x ;
GB_CTYPE *restrict Cx = (GB_CTYPE *) C->x ;
#endif
int8_t *restrict Cb = C->b ;
//--------------------------------------------------------------------------
// copy the tile A into C
//--------------------------------------------------------------------------
switch (GB_sparsity (A))
{
case GxB_FULL : // A is full
{
int A_nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
int64_t pA ;
#pragma omp parallel for num_threads(A_nthreads) schedule(static)
for (pA = 0 ; pA < anz ; pA++)
{
int64_t i = pA % avlen ;
int64_t j = pA / avlen ;
int64_t iC = cistart + i ;
int64_t jC = cvstart + j ;
int64_t pC = iC + jC * cvlen ;
// Cx [pC] = Ax [pA] ;
GB_COPY (pC, pA, A_iso) ;
Cb [pC] = 1 ;
}
}
break ;
case GxB_BITMAP : // A is bitmap
{
int A_nthreads = GB_nthreads (anz, chunk, nthreads_max) ;
const int8_t *restrict Ab = A->b ;
int64_t pA ;
#pragma omp parallel for num_threads(A_nthreads) schedule(static)
for (pA = 0 ; pA < anz ; pA++)
{
if (Ab [pA])
{
int64_t i = pA % avlen ;
int64_t j = pA / avlen ;
int64_t iC = cistart + i ;
int64_t jC = cvstart + j ;
int64_t pC = iC + jC * cvlen ;
// Cx [pC] = Ax [pA] ;
GB_COPY (pC, pA, A_iso) ;
Cb [pC] = 1 ;
}
}
}
break ;
default : // A is sparse or hypersparse
{
int A_nthreads, A_ntasks ;
GB_SLICE_MATRIX (A, 1, chunk) ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
int tid ;
#pragma omp parallel for num_threads(A_nthreads) schedule(static)
for (tid = 0 ; tid < A_ntasks ; tid++)
{
int64_t kfirst = kfirst_Aslice [tid] ;
int64_t klast = klast_Aslice [tid] ;
for (int64_t k = kfirst ; k <= klast ; k++)
{
int64_t j = GBH (Ah, k) ;
int64_t jC = cvstart + j ;
int64_t pC_start = cistart + jC * cvlen ;
int64_t pA_start, pA_end ;
GB_get_pA (&pA_start, &pA_end, tid, k,
kfirst, klast, pstart_Aslice, Ap, avlen) ;
GB_PRAGMA_SIMD
for (int64_t pA = pA_start ; pA < pA_end ; pA++)
{
int64_t i = Ai [pA] ;
int64_t pC = pC_start + i ;
// Cx [pC] = Ax [pA] ;
GB_COPY (pC, pA, A_iso) ;
Cb [pC] = 1 ;
}
}
}
}
break ;
}
done = true ;
}
#undef GB_CTYPE
#undef GB_ISO_CONCAT
|
GB_ewise_slice.c | //------------------------------------------------------------------------------
// GB_ewise_slice: slice the entries and vectors for an ewise operation
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Constructs a set of tasks to compute C, for an element-wise operation
// (GB_add, GB_emult, and GB_mask) that operates on two input matrices,
// C=op(A,B). The mask is ignored for computing where to slice the work, but
// it is sliced once the location has been found.
// M, A, B: any sparsity structure (hypersparse, sparse, bitmap, or full) This
// function should work if A or B are bitmap, but it is not needed in that
// case. C: constructed as sparse or hypersparse in the caller.
#define GB_FREE_WORK \
{ \
GB_FREE (Coarse) ; \
GB_FREE (Cwork) ; \
}
#define GB_FREE_ALL \
{ \
GB_FREE_WORK ; \
GB_FREE (TaskList) ; \
}
#include "GB.h"
//------------------------------------------------------------------------------
// GB_ewise_slice
//------------------------------------------------------------------------------
GrB_Info GB_ewise_slice
(
// output:
GB_task_struct **p_TaskList, // array of structs, of size max_ntasks
int *p_TaskList_size, // size of TaskList
int *p_ntasks, // # of tasks constructed
int *p_nthreads, // # of threads for eWise operation
// input:
const int64_t Cnvec, // # of vectors of C
const int64_t *GB_RESTRICT Ch, // vectors of C, if hypersparse
const int64_t *GB_RESTRICT C_to_M, // mapping of C to M
const int64_t *GB_RESTRICT C_to_A, // mapping of C to A
const int64_t *GB_RESTRICT C_to_B, // mapping of C to B
bool Ch_is_Mh, // if true, then Ch == Mh; GB_add only
const GrB_Matrix M, // mask matrix to slice (optional)
const GrB_Matrix A, // matrix to slice
const GrB_Matrix B, // matrix to slice
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (p_TaskList != NULL) ;
ASSERT (p_TaskList_size != NULL) ;
ASSERT (p_ntasks != NULL) ;
ASSERT (p_nthreads != NULL) ;
ASSERT_MATRIX_OK (A, "A for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (A)) ;
ASSERT (!GB_JUMBLED (A)) ;
ASSERT (!GB_PENDING (A)) ;
ASSERT_MATRIX_OK (B, "B for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (B)) ;
ASSERT (!GB_JUMBLED (B)) ;
ASSERT (!GB_PENDING (B)) ;
ASSERT_MATRIX_OK_OR_NULL (M, "M for ewise_slice", GB0) ;
ASSERT (!GB_ZOMBIES (M)) ;
ASSERT (!GB_JUMBLED (M)) ;
ASSERT (!GB_PENDING (M)) ;
(*p_TaskList ) = NULL ;
(*p_TaskList_size) = 0 ;
(*p_ntasks ) = 0 ;
(*p_nthreads ) = 1 ;
int64_t *GB_RESTRICT Cwork = NULL ;
int64_t *GB_RESTRICT Coarse = NULL ; // size ntasks1+1
int ntasks1 = 0 ;
//--------------------------------------------------------------------------
// determine # of threads to use
//--------------------------------------------------------------------------
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
//--------------------------------------------------------------------------
// allocate the initial TaskList
//--------------------------------------------------------------------------
// Allocate the TaskList to hold at least 2*ntask0 tasks. It will grow
// later, if needed. Usually, 64*nthreads_max is enough, but in a few cases
// fine tasks can cause this number to be exceeded. If that occurs,
// TaskList is reallocated.
// When the mask is present, it is often fastest to break the work up
// into tasks, even when nthreads_max is 1.
GB_task_struct *GB_RESTRICT TaskList = NULL ;
int max_ntasks = 0 ;
int ntasks0 = (M == NULL && nthreads_max == 1) ? 1 : (32 * nthreads_max) ;
GB_REALLOC_TASK_LIST (TaskList, ntasks0, max_ntasks) ;
//--------------------------------------------------------------------------
// check for quick return for a single task
//--------------------------------------------------------------------------
if (Cnvec == 0 || ntasks0 == 1)
{
// construct a single coarse task that computes all of C
TaskList [0].kfirst = 0 ;
TaskList [0].klast = Cnvec-1 ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = max_ntasks ;
(*p_ntasks ) = (Cnvec == 0) ? 0 : 1 ;
(*p_nthreads ) = 1 ;
return (GrB_SUCCESS) ;
}
//--------------------------------------------------------------------------
// get A, B, and M
//--------------------------------------------------------------------------
const int64_t vlen = A->vlen ;
const int64_t *GB_RESTRICT Ap = A->p ;
const int64_t *GB_RESTRICT Ai = A->i ;
const int64_t *GB_RESTRICT Bp = B->p ;
const int64_t *GB_RESTRICT Bi = B->i ;
bool Ch_is_Ah = (Ch != NULL && A->h != NULL && Ch == A->h) ;
bool Ch_is_Bh = (Ch != NULL && B->h != NULL && Ch == B->h) ;
const int64_t *GB_RESTRICT Mp = NULL ;
const int64_t *GB_RESTRICT Mi = NULL ;
bool M_is_hyper = GB_IS_HYPERSPARSE (M) ;
if (M != NULL)
{
Mp = M->p ;
Mi = M->i ;
// Ch_is_Mh is true if either true on input (for GB_add, which denotes
// that Ch is a deep copy of M->h), or if Ch is a shallow copy of M->h.
Ch_is_Mh = Ch_is_Mh || (Ch != NULL && M_is_hyper && Ch == M->h) ;
}
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
Cwork = GB_MALLOC (Cnvec+1, int64_t) ;
if (Cwork == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// compute an estimate of the work for each vector of C
//--------------------------------------------------------------------------
int nthreads_for_Cwork = GB_nthreads (Cnvec, chunk, nthreads_max) ;
int64_t k ;
#pragma omp parallel for num_threads(nthreads_for_Cwork) schedule(static)
for (k = 0 ; k < Cnvec ; k++)
{
//----------------------------------------------------------------------
// get the C(:,j) vector
//----------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
//----------------------------------------------------------------------
// get the corresponding vector of A
//----------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = C_to_A [k] ;
ASSERT (kA >= -1 && kA < A->nvec) ;
if (kA >= 0)
{
ASSERT (j == GBH (A->h, kA)) ;
}
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = k ;
ASSERT (j == A->h [kA]) ;
}
else
{
// A is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
kA = j ;
}
//----------------------------------------------------------------------
// get the corresponding vector of B
//----------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = C_to_B [k] ;
ASSERT (kB >= -1 && kB < B->nvec) ;
if (kB >= 0)
{
ASSERT (j == GBH (B->h, kB)) ;
}
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = k ;
ASSERT (j == B->h [kB]) ;
}
else
{
// B is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
kB = j ;
}
//----------------------------------------------------------------------
// estimate the work for C(:,j)
//----------------------------------------------------------------------
ASSERT (kA >= -1 && kA < A->nvec) ;
ASSERT (kB >= -1 && kB < B->nvec) ;
int64_t aknz = (kA < 0) ? 0 :
((Ap == NULL) ? vlen : (Ap [kA+1] - Ap [kA])) ;
int64_t bknz = (kB < 0) ? 0 :
((Bp == NULL) ? vlen : (Bp [kB+1] - Bp [kB])) ;
Cwork [k] = aknz + bknz + 1 ;
}
//--------------------------------------------------------------------------
// replace Cwork with its cumulative sum
//--------------------------------------------------------------------------
GB_cumsum (Cwork, Cnvec, NULL, nthreads_for_Cwork) ;
double cwork = (double) Cwork [Cnvec] ;
//--------------------------------------------------------------------------
// determine # of threads and tasks for the eWise operation
//--------------------------------------------------------------------------
int nthreads = GB_nthreads (cwork, chunk, nthreads_max) ;
ntasks0 = (M == NULL && nthreads == 1) ? 1 : (32 * nthreads) ;
double target_task_size = cwork / (double) (ntasks0) ;
target_task_size = GB_IMAX (target_task_size, chunk) ;
ntasks1 = cwork / target_task_size ;
ntasks1 = GB_IMAX (ntasks1, 1) ;
//--------------------------------------------------------------------------
// slice the work into coarse tasks
//--------------------------------------------------------------------------
if (!GB_pslice (&Coarse, Cwork, Cnvec, ntasks1, false))
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// construct all tasks, both coarse and fine
//--------------------------------------------------------------------------
int ntasks = 0 ;
for (int t = 0 ; t < ntasks1 ; t++)
{
//----------------------------------------------------------------------
// coarse task computes C (:,k:klast)
//----------------------------------------------------------------------
int64_t k = Coarse [t] ;
int64_t klast = Coarse [t+1] - 1 ;
if (k >= Cnvec)
{
//------------------------------------------------------------------
// all tasks have been constructed
//------------------------------------------------------------------
break ;
}
else if (k < klast)
{
//------------------------------------------------------------------
// coarse task has 2 or more vectors
//------------------------------------------------------------------
// This is a non-empty coarse-grain task that does two or more
// entire vectors of C, vectors k:klast, inclusive.
GB_REALLOC_TASK_LIST (TaskList, ntasks + 1, max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = klast ;
ntasks++ ;
}
else
{
//------------------------------------------------------------------
// coarse task has 0 or 1 vectors
//------------------------------------------------------------------
// As a coarse-grain task, this task is empty or does a single
// vector, k. Vector k must be removed from the work done by this
// and any other coarse-grain task, and split into one or more
// fine-grain tasks.
for (int tt = t ; tt < ntasks1 ; tt++)
{
// remove k from the initial slice tt
if (Coarse [tt] == k)
{
// remove k from task tt
Coarse [tt] = k+1 ;
}
else
{
// break, k not in task tt
break ;
}
}
//------------------------------------------------------------------
// get the vector of C
//------------------------------------------------------------------
int64_t j = GBH (Ch, k) ;
//------------------------------------------------------------------
// get the corresponding vector of A
//------------------------------------------------------------------
int64_t kA ;
if (C_to_A != NULL)
{
// A is hypersparse and the C_to_A mapping has been created
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = C_to_A [k] ;
}
else if (Ch_is_Ah)
{
// A is hypersparse, but Ch is a shallow copy of A->h
ASSERT (GB_IS_HYPERSPARSE (A)) ;
kA = k ;
}
else
{
// A is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (A)) ;
kA = j ;
}
int64_t pA_start = (kA < 0) ? (-1) : GBP (Ap, kA, vlen) ;
int64_t pA_end = (kA < 0) ? (-1) : GBP (Ap, kA+1, vlen) ;
bool a_empty = (pA_end == pA_start) ;
//------------------------------------------------------------------
// get the corresponding vector of B
//------------------------------------------------------------------
int64_t kB ;
if (C_to_B != NULL)
{
// B is hypersparse and the C_to_B mapping has been created
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = C_to_B [k] ;
}
else if (Ch_is_Bh)
{
// B is hypersparse, but Ch is a shallow copy of B->h
ASSERT (GB_IS_HYPERSPARSE (B)) ;
kB = k ;
}
else
{
// B is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (B)) ;
kB = j ;
}
int64_t pB_start = (kB < 0) ? (-1) : GBP (Bp, kB, vlen) ;
int64_t pB_end = (kB < 0) ? (-1) : GBP (Bp, kB+1, vlen) ;
bool b_empty = (pB_end == pB_start) ;
//------------------------------------------------------------------
// get the corresponding vector of M, if present
//------------------------------------------------------------------
// M can have any sparsity structure (hyper, sparse, bitmap, full)
int64_t pM_start = -1 ;
int64_t pM_end = -1 ;
if (M != NULL)
{
int64_t kM ;
if (C_to_M != NULL)
{
// M is hypersparse and the C_to_M mapping has been created
ASSERT (GB_IS_HYPERSPARSE (M)) ;
kM = C_to_M [k] ;
}
else if (Ch_is_Mh)
{
// M is hypersparse, but Ch is a copy of Mh
ASSERT (GB_IS_HYPERSPARSE (M)) ;
// Ch is a deep or shallow copy of Mh
kM = k ;
}
else
{
// M is sparse, bitmap, or full
ASSERT (!GB_IS_HYPERSPARSE (M)) ;
kM = j ;
}
pM_start = (kM < 0) ? -1 : GBP (Mp, kM, vlen) ;
pM_end = (kM < 0) ? -1 : GBP (Mp, kM+1, vlen) ;
}
bool m_empty = (pM_end == pM_start) ;
//------------------------------------------------------------------
// determine the # of fine-grain tasks to create for vector k
//------------------------------------------------------------------
double ckwork = Cwork [k+1] - Cwork [k] ;
int nfine = ckwork / target_task_size ;
nfine = GB_IMAX (nfine, 1) ;
// make the TaskList bigger, if needed
GB_REALLOC_TASK_LIST (TaskList, ntasks + nfine, max_ntasks) ;
//------------------------------------------------------------------
// create the fine-grain tasks
//------------------------------------------------------------------
if (nfine == 1)
{
//--------------------------------------------------------------
// this is a single coarse task for all of vector k
//--------------------------------------------------------------
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = k ;
ntasks++ ;
}
else
{
//--------------------------------------------------------------
// slice vector k into nfine fine tasks
//--------------------------------------------------------------
// first fine task starts at the top of vector k
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = (m_empty) ? -1 : pM_start ;
TaskList [ntasks].pA = (a_empty) ? -1 : pA_start ;
TaskList [ntasks].pB = (b_empty) ? -1 : pB_start ;
TaskList [ntasks].len = 0 ; // to be determined below
ntasks++ ;
int64_t ilast = 0, i = 0 ;
for (int tfine = 1 ; tfine < nfine ; tfine++)
{
double target_work = ((nfine-tfine) * ckwork) / nfine ;
int64_t pM, pA, pB ;
GB_slice_vector (&i, &pM, &pA, &pB,
pM_start, pM_end, Mi,
pA_start, pA_end, Ai,
pB_start, pB_end, Bi,
vlen, target_work) ;
// prior task ends at pM-1, pA-1, and pB-1
TaskList [ntasks-1].pM_end = pM ;
TaskList [ntasks-1].pA_end = pA ;
TaskList [ntasks-1].pB_end = pB ;
// prior task handles indices ilast:i-1
TaskList [ntasks-1].len = i - ilast ;
// this task starts at pM, pA, and pB
ASSERT (ntasks < max_ntasks) ;
TaskList [ntasks].kfirst = k ;
TaskList [ntasks].klast = -1 ; // this is a fine task
TaskList [ntasks].pM = pM ;
TaskList [ntasks].pA = pA ;
TaskList [ntasks].pB = pB ;
// advance to the next task
ntasks++ ;
ilast = i ;
}
// Terminate the last fine task.
ASSERT (ntasks <= max_ntasks) ;
TaskList [ntasks-1].pM_end = (m_empty) ? -1 : pM_end ;
TaskList [ntasks-1].pA_end = (a_empty) ? -1 : pA_end ;
TaskList [ntasks-1].pB_end = (b_empty) ? -1 : pB_end ;
TaskList [ntasks-1].len = vlen - i ;
}
}
}
ASSERT (ntasks <= max_ntasks) ;
//--------------------------------------------------------------------------
// free workspace and return result
//--------------------------------------------------------------------------
GB_FREE_WORK ;
(*p_TaskList ) = TaskList ;
(*p_TaskList_size) = max_ntasks ;
(*p_ntasks ) = ntasks ;
(*p_nthreads ) = nthreads ;
return (GrB_SUCCESS) ;
}
|
argsort_op.h | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <algorithm>
#include <utility>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/transpose_op.h"
namespace paddle {
namespace operators {
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenMatrix = framework::EigenMatrix<T, MajorType, IndexType>;
template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
using EigenVector = framework::EigenVector<T, MajorType, IndexType>;
using Tensor = framework::Tensor;
template <typename T, typename Type>
static void FullSort(Type input_height, Type input_width, int input_dim,
const framework::Tensor* input, T* t_out, Type* t_indices,
bool descending) {
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
#endif
for (Type i = 0; i < input_height; ++i) {
std::vector<std::pair<T, Type>> col_vec;
col_vec.reserve(input_width);
if (input_dim == 1) {
auto e_input = EigenVector<T>::Flatten(*input);
for (Type j = 0; j < input_width; ++j) {
col_vec.push_back(std::pair<T, Type>(e_input(j), j));
}
} else {
auto e_input = EigenMatrix<T>::Reshape(*input, input_dim - 1);
for (Type j = 0; j < input_width; ++j) {
col_vec.push_back(std::pair<T, Type>(e_input(i, j), j));
}
}
std::sort(col_vec.begin(), col_vec.end(),
[&](const std::pair<T, Type>& l, const std::pair<T, Type>& r) {
if (descending)
return l.first > r.first;
else
return l.first < r.first;
});
for (Type j = 0; j < input_width; ++j) {
t_out[i * input_width + j] = col_vec[j].first;
t_indices[i * input_width + j] = col_vec[j].second;
}
}
}
template <typename DeviceContext, typename T>
class ArgsortKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<framework::Tensor>("X");
auto* output = ctx.Output<framework::Tensor>("Out");
auto* indices = ctx.Output<framework::Tensor>("Indices");
int axis = ctx.Attr<int>("axis");
bool descending = ctx.Attr<bool>("descending");
auto in_dims = input->dims();
axis = (axis < 0) ? (in_dims.size() + axis) : axis;
T* out_data = output->mutable_data<T>(ctx.GetPlace());
// Do full sort
if (axis == -1 || axis + 1 == in_dims.size()) {
const int64_t input_height = framework::product(
framework::slice_ddim(in_dims, 0, in_dims.size() - 1));
const int64_t input_width = in_dims[in_dims.size() - 1];
int64_t* ids_data = indices->mutable_data<int64_t>(ctx.GetPlace());
FullSort<T, int64_t>(input_height, input_width, in_dims.size(), input,
out_data, ids_data, descending);
} else {
// If not full sort do transpose
std::vector<int> trans;
for (int i = 0; i < axis; i++) {
trans.push_back(i);
}
trans.push_back(in_dims.size() - 1);
for (int i = axis + 1; i < in_dims.size() - 1; i++) {
trans.push_back(i);
}
trans.push_back(axis);
framework::DDim trans_dims(in_dims);
for (size_t i = 0; i < trans.size(); i++) {
trans_dims[i] = in_dims[trans[i]];
}
Tensor trans_inp;
trans_inp.mutable_data<T>(trans_dims, ctx.GetPlace());
int ndims = trans.size();
auto& dev_ctx = ctx.template device_context<platform::CPUDeviceContext>();
// Do transpose
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_ctx, *input,
&trans_inp, trans);
const int64_t input_height = framework::product(
framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1));
const int64_t input_width = trans_dims[trans_dims.size() - 1];
Tensor tmp_out;
T* t_out = tmp_out.mutable_data<T>(trans_dims, ctx.GetPlace());
output->mutable_data<T>(ctx.GetPlace());
Tensor tmp_indices;
auto* t_ind =
tmp_indices.mutable_data<int64_t>(trans_dims, ctx.GetPlace());
FullSort<T, int64_t>(input_height, input_width, in_dims.size(),
&trans_inp, t_out, t_ind, descending);
indices->mutable_data<int64_t>(ctx.GetPlace());
TransCompute<platform::CPUDeviceContext, int64_t>(
ndims, dev_ctx, tmp_indices, indices, trans);
// transpose back
TransCompute<platform::CPUDeviceContext, T>(ndims, dev_ctx, tmp_out,
output, trans);
}
}
};
} // namespace operators
} // namespace paddle
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
//template<typename xpu, typename LOP, typename ROP, typename DType>
//static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<TBlob> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<TBlob> &outputs) {
// using namespace mxnet_op;
// Stream<xpu> *s = ctx.get_stream<xpu>();
// const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
// / DataType<DType>::kLanes);
// const DType *ograd_dptr = inputs[0].dptr<DType>();
// if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
// CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
// } else if (req[0] != kNullOp) {
// DType *lgrad_dptr = outputs[0].dptr<DType>();
// MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
// Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
// });
// }
// if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
// CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
// } else if (req[1] != kNullOp) {
// DType *rgrad_dptr = outputs[1].dptr<DType>();
// MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
// Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
// });
// }
//}
//template<typename xpu, typename LOP, typename ROP, typename DType>
//static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<TBlob> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<TBlob> &outputs) {
// DCHECK_EQ(outputs.size(), 2U);
// DCHECK_EQ(inputs.size(), 3U);
// mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
// const DType *ograd_dptr = inputs[0].dptr<DType>();
// const DType *lhs_dptr = inputs[1].dptr<DType>();
// const DType *rhs_dptr = inputs[2].dptr<DType>();
// MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
// const int size = static_cast<int>(
// (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
// / mxnet_op::DataType<DType>::kLanes);
// DType * lgrad_dptr = outputs[0].dptr<DType>();
// mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
// s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
// MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
// const int size = static_cast<int>(
// (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
// / mxnet_op::DataType<DType>::kLanes);
// DType * rgrad_dptr = outputs[1].dptr<DType>();
// mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
// s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
//}
//template<
// typename xpu,
// typename LOP,
// typename ROP,
// typename DType,
// bool in0_ok_dense = false,
// bool in1_ok_dense = false,
// bool in2_ok_dense = false,
// typename BackupCompute>
//static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<NDArray> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<NDArray> &outputs,
// BackupCompute backup_compute) {
// mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// // lhs grad
// if (req[0] != kNullOp) {
// // RspRspOp can handle dense outputs so long as OP(0, 0) == 0
// MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
// RspRspOp<DType, IType, LOP>(
// s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
// false, false, false, false);
// });
// // lhs in-place
// MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
// RspRspOp<DType, IType, op::mshadow_op::mul>(
// s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
// false, false, true, false);
// });
// }
// // rhs grad
// if (req[1] != kNullOp) {
// MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
// RspRspOp<DType, IType, ROP>(
// s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
// false, false, false, false);
// });
// // rhs in-place
// MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
// RspRspOp<DType, IType, op::mshadow_op::mul>(
// s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
// false, false, true, false);
// });
// }
//}
protected:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename DType, typename IType, typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename DType, typename IType, typename CType, typename OP>
static inline void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the inputs to be dense and still produce a sparse output
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool lhs_dense_ok = true, bool rhs_dense_ok = true>
static bool AllowLRDenseInputWithSparseOutputStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && lhs_stype == kDefaultStorage && rhs_stype == kDefaultStorage) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched) {
if ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(rhs_dense_ok && lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_dense_ok && lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) {
// rsp, rsp -> rsp
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
} else if ((lhs_stype == kCSRStorage && rhs_dense_ok) ||
(rhs_stype == kCSRStorage && lhs_dense_ok)) {
// csr, dns -> csr
// dns, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeFallback);
}
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
//static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
// int dev_mask,
// DispatchMode* dispatch_mode,
// std::vector<int> *in_attrs,
// std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((common::ContainsOnlyStorage(inputs, kRowSparseStorage))
&& (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1;
MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
});
});
} else if (common::ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
CsrCsrOp<DType, IType, CType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
});
});
});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
});
});
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
//template<typename xpu, typename LOP, typename ROP>
//static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<TBlob> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<TBlob> &outputs) {
// MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
// BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
// });
//}
//template<typename xpu, typename LOP, typename ROP>
//static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<TBlob> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<TBlob> &outputs) {
// MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
// BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
// });
//}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
//template<typename xpu, typename LOP, typename ROP>
//static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<TBlob> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<TBlob> &outputs) {
// MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
// BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
// });
//}
//template<typename xpu, typename LOP, typename ROP>
//static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<TBlob> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<TBlob> &outputs) {
// MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
// BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
// });
//}
//template<
// typename xpu, typename LOP, typename ROP,
// bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
//static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
// const OpContext &ctx,
// const std::vector<NDArray> &inputs,
// const std::vector<OpReqType> &req,
// const std::vector<NDArray> &outputs) {
// using namespace common;
// CHECK_EQ(inputs.size(), 3U);
// CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
// const auto lhs_grad_stype = outputs[0].storage_type();
// const auto rhs_grad_stype = outputs[1].storage_type();
// if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
// (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
// (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// // rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
// MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
// BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
// attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
// });
// }
//}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
secondorder_itSL_evo_omp.h | //- NEW SL ORDER 2 function: BASIC ORDER 1 SCHEME (OK)
void HJB_SL::secondorder_itSL_evo_omp(double t, double dt)
{
//- P=PARAMP of the data file.
int i,j,c,e,d,p,test;
#pragma omp parallel num_threads(OMP_NUM_THREADS) private(d,i,j,c,p,test,e) shared(t,dt) default(none)
{
#pragma omp for
//for(i=0; i<mesh->inn_nbPoints; i++)
for(j=0;j<ranksize;j++){
i =rank[j];
vold[i]=v[i];
}
#pragma omp for
//for(i=0; i<mesh->inn_nbPoints; i++){
for(j=0;j<ranksize;j++){
i =rank[j];
double a=0.0;
double minA=0.0;
double coordsomp[dim];
double rCoordomp[dim];
double dvectdoubleomp[dim];
(mesh->*(mesh->setcoords))(i,rCoordomp);
for(c=0;c<ncall;c++){
a = 0.0;
//- this: (p<=P) for use with data/data_SL2_optionput_approach2.h
for(p=0;p<P;p++){
for(e=-1;e<=1;e+=2){ //- two points formula
funcY(rCoordomp, p, e, (*u)[c], t, dt, dvectdoubleomp);
if(periodic_mesh)
ComputePeriodic(dvectdoubleomp, coordsomp);
else
for(d=0;d<dim;d++)
coordsomp[d] = dvectdoubleomp[d];
test=0;
for(d=0;d<dim;d++){
if(coordsomp[d] <= lb[d] || coordsomp[d] >= hb[d]){
test=1;
break;
}
}
if(test)
a += (*this.*VbordCompute)(t,coordsomp,vold);
else
a += (*this.*interpolation)(coordsomp,vold);
}
}
a = a / (2*P);
a += dt * (*distributed_cost)(rCoordomp,(*u)[c],t);
a *= exp(-funcR(rCoordomp, (*u)[c], t)*dt);
a = a/(1.0+dt*discount_factor(rCoordomp)); // for + lambda * u parameter (or steady equations)
if(c==0)
minA = a;
else
minA = (*OPTIM_SL)(minA,a);
}
v[i]=minA;
}
}// end of pragma parallel
return;
}
|
primo.c | #include <stdio.h>
#include <math.h>
int primo(long num)
{
long d;
if(num <= 1)
return 0;
if(num > 3) {
if(num % 2 == 0) return 0;
long max_divisor = sqrt(num);
for(d = 3; d <= max_divisor; d+=2) {
if(num % d == 0) return 0;
}
}
return 1;
}
int main()
{
long max_num = 5000000;
long cont_primo;
long soma;
int n;
if(max_num <= 1) soma = 0;
else
{
if(max_num == 2) soma = 1;
else
{
soma = 1;
#pragma omp parallel for private(cont_primo) reduction(+:soma) schedule(guided, 100)
for(n = 3; n < max_num; n += 2) {
cont_primo = primo(n);
soma = soma + cont_primo;
}
}
}
printf("Número total de primos: %ld\n", soma);
return 0;
}
|
par_nongalerkin.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "../HYPRE.h"
/* This file contains the routines for constructing non-Galerkin coarse grid
* operators, based on the original Galerkin coarse grid
*/
/* Take all of the indices from indices[start, start+1, start+2, ..., end]
* and take the corresponding entries in array and place them in-order in output.
* Assumptions:
* output is of length end-start+1
* indices never contains an index that goes out of bounds in array
* */
HYPRE_Int
hypre_GrabSubArray(HYPRE_Int * indices,
HYPRE_Int start,
HYPRE_Int end,
HYPRE_BigInt * array,
HYPRE_BigInt * output)
{
HYPRE_Int i, length;
length = end - start + 1;
for(i = 0; i < length; i++)
{ output[i] = array[ indices[start + i] ]; }
return 0;
}
/* Compute the intersection of x and y, placing
* the intersection in z. Additionally, the array
* x_data is associated with x, i.e., the entries
* that we grab from x, we also grab from x_data.
* If x[k] is placed in z[m], then x_data[k] goes to
* output_x_data[m].
*
* Assumptions:
* z is of length min(x_length, y_length)
* x and y are sorted
* x_length and y_length are similar in size, otherwise,
* looping over the smaller array and doing binary search
* in the longer array is faster.
* */
HYPRE_Int
hypre_IntersectTwoArrays(HYPRE_Int *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_Int *y,
HYPRE_Int y_length,
HYPRE_Int *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
HYPRE_Int
hypre_IntersectTwoBigArrays(HYPRE_BigInt *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_BigInt *y,
HYPRE_Int y_length,
HYPRE_BigInt *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
/* Copy CSR matrix A to CSR matrix B. The column indices are
* assumed to be sorted, and the sparsity pattern of B is a subset
* of the sparsity pattern of A.
*
* Assumptions:
* Column indices of A and B are sorted
* Sparsity pattern of B is a subset of A's
* A and B are the same size and have same data layout
**/
HYPRE_Int
hypre_SortedCopyParCSRData(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
/* Grab off A and B's data structures */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(B_diag);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *temp_int_array = NULL;
HYPRE_Int temp_int_array_length=0;
HYPRE_Int i, length, offset_A, offset_B;
for(i = 0; i < num_variables; i++)
{
/* Deal with the first row entries, which may be diagonal elements */
if( A_diag_j[A_diag_i[i]] == i)
{ offset_A = 1; }
else
{ offset_A = 0; }
if( B_diag_j[B_diag_i[i]] == i)
{ offset_B = 1; }
else
{ offset_B = 0; }
if( (offset_B == 1) && (offset_A == 1) )
{ B_diag_data[B_diag_i[i]] = A_diag_data[A_diag_i[i]]; }
/* This finds the intersection of the column indices, and
* also copies the matching data in A to the data array in B
**/
if( (A_diag_i[i+1] - A_diag_i[i] - offset_A) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_diag_i[i+1] - A_diag_i[i] - offset_A);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_diag_j[A_diag_i[i] + offset_A]),
&(A_diag_data[A_diag_i[i] + offset_A]),
A_diag_i[i+1] - A_diag_i[i] - offset_A,
&(B_diag_j[B_diag_i[i] + offset_B]),
B_diag_i[i+1] - B_diag_i[i] - offset_B,
temp_int_array,
&(B_diag_data[B_diag_i[i] + offset_B]),
&length);
if( (A_offd_i[i+1] - A_offd_i[i]) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_offd_i[i+1] - A_offd_i[i]);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_offd_j[A_offd_i[i]]),
&(A_offd_data[A_offd_i[i]]),
A_offd_i[i+1] - A_offd_i[i],
&(B_offd_j[B_offd_i[i]]),
B_offd_i[i+1] - B_offd_i[i],
temp_int_array,
&(B_offd_data[B_offd_i[i]]),
&length);
}
if(temp_int_array)
{ hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST); }
return 1;
}
/*
* Equivalent to hypre_BoomerAMGCreateS, except, the data array of S
* is not Null and contains the data entries from A.
*/
HYPRE_Int
hypre_BoomerAMG_MyCreateS(hypre_ParCSRMatrix *A,
HYPRE_Real strength_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_Real *S_offd_data;
HYPRE_Real diag, row_scale, row_sum;
HYPRE_Int i, jA, jS;
HYPRE_Int ierr = 0;
HYPRE_Int *dof_func_offd;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int index, start, j;
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = aij, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
num_nonzeros_diag = A_diag_i[num_variables];
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
A_offd_i = hypre_CSRMatrixI(A_offd);
num_nonzeros_offd = A_offd_i[num_variables];
/* Initialize S */
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars,
row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
/* row_starts is owned by A, col_starts = row_starts */
hypre_ParCSRMatrixSetRowStartsOwner(S,0);
S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_diag) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_diag, HYPRE_MEMORY_HOST);
S_offd = hypre_ParCSRMatrixOffd(S);
hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables+1, HYPRE_MEMORY_HOST);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd_i = hypre_CSRMatrixI(S_offd);
hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST;
dof_func_offd = NULL;
if (num_cols_offd)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_offd) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_offd, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
/*-------------------------------------------------------------------
* Get the dof_func data for the off-processor columns
*-------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_functions > 1)
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j=start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i+1); j++)
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg,j)];
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
/* give S same nonzero structure as A */
hypre_ParCSRMatrixCopy(A,S,1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_variables; i++)
{
diag = A_diag_data[A_diag_i[i]];
/* compute scaling factor and row sum */
row_scale = 0.0;
row_sum = diag;
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
/* compute row entries of S */
S_diag_j[A_diag_i[i]] = -1;
if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0))
{
/* make all dependencies weak */
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
S_diag_j[jA] = -1;
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
S_offd_j[jA] = -1;
}
}
else
{
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i]+1; jA < A_diag_i[i+1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
}
}
}
/*--------------------------------------------------------------
* "Compress" the strength matrix.
*
* NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor!
*
* NOTE: This "compression" section of code may not be removed, the
* non-Galerkin routine depends on it.
*----------------------------------------------------------------*/
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i+1]; jA++)
{
if (S_diag_j[jA] > -1)
{
S_diag_j[jS] = S_diag_j[jA];
S_diag_data[jS] = S_diag_data[jA];
jS++;
}
}
}
S_diag_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_diag) = jS;
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i+1]; jA++)
{
if (S_offd_j[jA] > -1)
{
S_offd_j[jS] = S_offd_j[jA];
S_offd_data[jS] = S_offd_data[jA];
jS++;
}
}
}
S_offd_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_offd) = jS;
hypre_ParCSRMatrixCommPkg(S) = NULL;
*S_ptr = S;
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return (ierr);
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Int *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBigBufferInit( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Update the buffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferNewRow(HYPRE_BigInt *ijbuf_rownums, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_numcols,
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt new_row)
{
HYPRE_Int ierr = 0;
/* First check to see if the previous row was empty, and if so, overwrite that row */
if( ijbuf_numcols[(*ijbuf_rowcounter)-1] == 0 )
{
ijbuf_rownums[(*ijbuf_rowcounter)-1] = new_row;
}
else
{
/* Move to the next row */
ijbuf_rownums[(*ijbuf_rowcounter)] = new_row;
ijbuf_numcols[(*ijbuf_rowcounter)] = 0;
(*ijbuf_rowcounter)++;
}
return ierr;
}
/**
* Compress the current row in an IJ Buffer by removing duplicate entries
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompressRow( HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real *ijbuf_data,
HYPRE_BigInt *ijbuf_cols,
HYPRE_BigInt *ijbuf_rownums,
HYPRE_Int *ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int nentries, i, nduplicate;
/* Compress the current row by removing any repeat entries,
* making sure to decrement ijbuf_cnt by nduplicate */
nentries = ijbuf_numcols[ ijbuf_rowcounter-1 ];
nduplicate = 0;
hypre_BigQsort1(ijbuf_cols, ijbuf_data, (*ijbuf_cnt)-nentries, (*ijbuf_cnt)-1 );
for(i =(*ijbuf_cnt)-nentries+1; i <= (*ijbuf_cnt)-1; i++)
{
if( ijbuf_cols[i] == ijbuf_cols[i-1] )
{
/* Shift duplicate entry down */
nduplicate++;
ijbuf_data[i - nduplicate] += ijbuf_data[i];
}
else if(nduplicate > 0)
{
ijbuf_data[i - nduplicate] = ijbuf_data[i];
ijbuf_cols[i - nduplicate] = ijbuf_cols[i];
}
}
(*ijbuf_cnt) -= nduplicate;
ijbuf_numcols[ ijbuf_rowcounter-1 ] -= nduplicate;
return ierr;
}
/**
* Compress the entire buffer, removing duplicate rows
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompress( HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int *indys = hypre_CTAlloc(HYPRE_Int, (*ijbuf_rowcounter) , HYPRE_MEMORY_HOST);
HYPRE_Int i, j, duplicate, cnt_new, rowcounter_new, prev_row;
HYPRE_Int row_loc;
HYPRE_BigInt row_start, row_stop, row;
HYPRE_Real *data_new;
HYPRE_BigInt *cols_new;
HYPRE_BigInt *rownums_new;
HYPRE_Int *numcols_new;
/* Do a sort on rownums, but store the original order in indys.
* Then see if there are any duplicate rows */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{ indys[i] = i; }
hypre_BigQsortbi((*ijbuf_rownums), indys, 0, (*ijbuf_rowcounter)-1);
duplicate = 0;
for(i = 1; i < (*ijbuf_rowcounter); i++)
{
if(indys[i] != (indys[i-1]+1))
{
duplicate = 1;
break;
}
}
/* Compress duplicate rows */
if(duplicate)
{
/* Accumulate numcols, so that it functions like a CSR row-pointer */
for(i = 1; i < (*ijbuf_rowcounter); i++)
{ (*ijbuf_numcols)[i] += (*ijbuf_numcols)[i-1]; }
/* Initialize new buffer */
prev_row = -1;
rowcounter_new = 0;
cnt_new = 0;
data_new = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
cols_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
rownums_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new[0] = 0;
/* Cycle through each row */
for(i = 0; i < (*ijbuf_rowcounter); i++)
{
/* Find which row this is in local and global numberings, and where
* this row's data starts and stops in the buffer*/
row_loc = indys[i];
row = (*ijbuf_rownums)[i];
if(row_loc > 0)
{
row_start = (*ijbuf_numcols)[row_loc-1];
row_stop = (*ijbuf_numcols)[row_loc];
}
else
{
row_start = 0;
row_stop = (*ijbuf_numcols)[row_loc];
}
/* Is this a new row? If so, compress previous row, and add a new
* one. Noting that prev_row = -1 is a special value */
if(row != prev_row)
{
if(prev_row != -1)
{
/* Compress previous row */
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
prev_row = row;
numcols_new[rowcounter_new] = 0;
rownums_new[rowcounter_new] = row;
rowcounter_new++;
}
/* Copy row into new buffer */
for(j = row_start; j < row_stop; j++)
{
data_new[cnt_new] = (*ijbuf_data)[j];
cols_new[cnt_new] = (*ijbuf_cols)[j];
numcols_new[rowcounter_new-1]++;
cnt_new++;
}
}
/* Compress the final row */
if(i > 1)
{
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
*ijbuf_cnt = cnt_new;
*ijbuf_rowcounter = rowcounter_new;
/* Point to the new buffer */
hypre_TFree(*ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_numcols, HYPRE_MEMORY_DEVICE);
(*ijbuf_data) = data_new;
(*ijbuf_cols) = cols_new;
(*ijbuf_rownums) = rownums_new;
(*ijbuf_numcols) = numcols_new;
}
hypre_TFree(indys, HYPRE_MEMORY_HOST);
return ierr;
}
/**
* Do a buffered write to an IJ matrix.
* That is, write to the buffer, until the buffer is full. Then when the
* buffer is full, write to the IJ matrix and reset the buffer counters
* In effect, this buffers this operation
* A[row_to_write, col_to_write] += val_to_write
**/
HYPRE_Int
hypre_NonGalerkinIJBufferWrite( HYPRE_IJMatrix B, /* Unassembled matrix to add an entry to */
HYPRE_Int *ijbuf_cnt, /* current buffer size */
HYPRE_Int ijbuf_size, /* max buffer size */
HYPRE_Int *ijbuf_rowcounter, /* num of rows in rownums, (i.e., size of rownums) */
/* This counter will increase as you call this function for multiple rows */
HYPRE_Real **ijbuf_data, /* Array of values, of size ijbuf_size */
HYPRE_BigInt **ijbuf_cols, /* Array of col indices, of size ijbuf_size */
HYPRE_BigInt **ijbuf_rownums, /* Holds row-indices that with numcols makes for a CSR-like data structure*/
HYPRE_Int **ijbuf_numcols, /* rownums[i] is the row num, and numcols holds the number of entries being added */
/* for that row. Note numcols is not cumulative like an actual CSR data structure*/
HYPRE_BigInt row_to_write, /* Entry to add to the buffer */
HYPRE_BigInt col_to_write, /* Ditto */
HYPRE_Real val_to_write ) /* Ditto */
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) == 0 )
{
/* brand new buffer: increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
else if((*ijbuf_rownums)[ (*ijbuf_rowcounter)-1 ] != row_to_write)
{
/* If this is a new row, compress the previous row */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
/* increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow( (*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
/* Add new entry to buffer */
(*ijbuf_cols)[(*ijbuf_cnt)] = col_to_write;
(*ijbuf_data)[(*ijbuf_cnt)] = val_to_write;
(*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ]++;
(*ijbuf_cnt)++;
/* Buffer is full, write to the matrix object */
if ( (*ijbuf_cnt) == (ijbuf_size-1) )
{
/* If the last row is empty, decrement rowcounter */
if( (*ijbuf_numcols)[ (*ijbuf_rowcounter)-1 ] == 0)
{ (*ijbuf_rowcounter)--; }
/* Compress and Add Entries */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, *ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
/* Reinitialize the buffer */
hypre_NonGalerkinIJBufferInit( ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_numcols));
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
return ierr;
}
/**
* Empty the IJ Buffer with a final AddToValues.
**/
HYPRE_Int
hypre_NonGalerkinIJBufferEmpty(HYPRE_IJMatrix B, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt,
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
if( (*ijbuf_cnt) > 0)
{
/* Compress the last row and then write */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, &ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums), (*ijbuf_cols), (*ijbuf_data));
}
(*ijbuf_cnt = 0);
return ierr;
}
/*
* Construct sparsity pattern based on R_I A P, plus entries required by drop tolerance
*/
hypre_ParCSRMatrix *
hypre_NonGalerkinSparsityPattern(hypre_ParCSRMatrix *R_IAP,
hypre_ParCSRMatrix *RAP,
HYPRE_Int * CF_marker,
HYPRE_Real droptol,
HYPRE_Int sym_collapse,
HYPRE_Int collapse_beta )
{
/* MPI Communicator */
MPI_Comm comm = hypre_ParCSRMatrixComm(RAP);
/* Declare R_IAP */
hypre_CSRMatrix *R_IAP_diag = hypre_ParCSRMatrixDiag(R_IAP);
HYPRE_Int *R_IAP_diag_i = hypre_CSRMatrixI(R_IAP_diag);
HYPRE_Int *R_IAP_diag_j = hypre_CSRMatrixJ(R_IAP_diag);
hypre_CSRMatrix *R_IAP_offd = hypre_ParCSRMatrixOffd(R_IAP);
HYPRE_Int *R_IAP_offd_i = hypre_CSRMatrixI(R_IAP_offd);
HYPRE_Int *R_IAP_offd_j = hypre_CSRMatrixJ(R_IAP_offd);
HYPRE_BigInt *col_map_offd_R_IAP = hypre_ParCSRMatrixColMapOffd(R_IAP);
/* Declare RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
HYPRE_BigInt last_col_diag_RAP = first_col_diag_RAP + (HYPRE_BigInt)num_cols_diag_RAP - 1;
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
/* Declare A */
HYPRE_Int num_fine_variables = hypre_CSRMatrixNumRows(R_IAP_diag);
/* Declare IJ matrices */
HYPRE_IJMatrix Pattern;
hypre_ParCSRMatrix *Pattern_CSR = NULL;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Other Declarations */
HYPRE_Int ierr = 0;
HYPRE_Real max_entry = 0.0;
HYPRE_Real max_entry_offd = 0.0;
HYPRE_Int * rownz = NULL;
HYPRE_Int i, j, Cpt;
HYPRE_BigInt row_start, row_end, global_row, global_col;
/* Other Setup */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
/*
* Initialize the IJ matrix, leveraging our rough knowledge of the
* nonzero structure of Pattern based on RAP
*
* ilower, iupper, jlower, jupper */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP, first_col_diag_RAP, last_col_diag_RAP, &Pattern);
ierr += HYPRE_IJMatrixSetObjectType(Pattern, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(RAP_diag_i[i+1] - RAP_diag_i[i]) + 1.2*(RAP_offd_i[i+1] - RAP_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(Pattern, rownz);
ierr += HYPRE_IJMatrixInitialize(Pattern);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Place entries in R_IAP into Pattern
*/
Cpt = -1; /* Cpt contains the fine grid index of the i-th Cpt */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Find the next Coarse Point in CF_marker */
for(j = Cpt+1; j < num_fine_variables; j++)
{
if(CF_marker[j] == 1) /* Found Next C-point */
{
Cpt = j;
break;
}
}
/* Diag Portion */
row_start = R_IAP_diag_i[Cpt];
row_end = R_IAP_diag_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = R_IAP_diag_j[j] + first_col_diag_RAP;
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
/* Offdiag Portion */
row_start = R_IAP_offd_i[Cpt];
row_end = R_IAP_offd_i[Cpt+1];
for(j = row_start; j < row_end; j++)
{
global_col = col_map_offd_R_IAP[ R_IAP_offd_j[j] ];
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
}
/*
* Use drop-tolerance to compute new entries for sparsity pattern
*/
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE
#endif */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
/* Compute the drop tolerance for this row, which is just
* abs(max of row i)*droptol */
max_entry = -1.0;
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( (RAP_diag_j[j] != i) && (max_entry < fabs(RAP_diag_data[j]) ) )
{ max_entry = fabs(RAP_diag_data[j]); }
}
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
{
if( max_entry < fabs(RAP_offd_data[j]) )
{ max_entry = fabs(RAP_offd_data[j]); }
}
}
max_entry *= droptol;
max_entry_offd = max_entry*collapse_beta;
/* Loop over diag portion, adding all entries that are "strong" */
for(j = RAP_diag_i[i]; j < RAP_diag_i[i+1]; j++)
{
if( fabs(RAP_diag_data[j]) > max_entry )
{
global_col = RAP_diag_j[j] + first_col_diag_RAP;
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
/* Loop over offd portion, adding all entries that are "strong" */
for(j = RAP_offd_i[i]; j < RAP_offd_i[i+1]; j++)
{
if( fabs(RAP_offd_data[j]) > max_entry_offd )
{
global_col = col_map_offd_RAP[ RAP_offd_j[j] ];
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if(sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Finalize Construction of Pattern */
ierr += HYPRE_IJMatrixAssemble(Pattern);
ierr += HYPRE_IJMatrixGetObject( Pattern, (void**) &Pattern_CSR );
/* Deallocate */
ierr += HYPRE_IJMatrixSetObjectType(Pattern, -1);
ierr += HYPRE_IJMatrixDestroy(Pattern);
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
return Pattern_CSR;
}
HYPRE_Int
hypre_BoomerAMGBuildNonGalerkinCoarseOperator( hypre_ParCSRMatrix **RAP_ptr,
hypre_ParCSRMatrix *AP,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int * dof_func_value,
HYPRE_Int * CF_marker,
HYPRE_Real droptol, HYPRE_Int sym_collapse,
HYPRE_Real lump_percent, HYPRE_Int collapse_beta )
{
/* Initializations */
MPI_Comm comm = hypre_ParCSRMatrixComm(*RAP_ptr);
hypre_ParCSRMatrix *S = NULL;
hypre_ParCSRMatrix *RAP = *RAP_ptr;
HYPRE_Int i, j, k, row_start, row_end, value, num_cols_offd_Sext, num_procs;
HYPRE_Int S_ext_diag_size, S_ext_offd_size, last_col_diag_RAP, cnt_offd, cnt_diag, cnt;
HYPRE_Int col_indx_Pattern, current_Pattern_j, col_indx_RAP;
/* HYPRE_Real start_time = hypre_MPI_Wtime(); */
/* HYPRE_Real end_time; */
HYPRE_BigInt *temp = NULL;
HYPRE_Int ierr = 0;
char filename[256];
/* Lumping related variables */
HYPRE_IJMatrix ijmatrix;
HYPRE_BigInt * Pattern_offd_indices = NULL;
HYPRE_BigInt * S_offd_indices = NULL;
HYPRE_BigInt * offd_intersection = NULL;
HYPRE_Real * offd_intersection_data = NULL;
HYPRE_Int * diag_intersection = NULL;
HYPRE_Real * diag_intersection_data = NULL;
HYPRE_Int Pattern_offd_indices_len = 0;
HYPRE_Int Pattern_offd_indices_allocated_len= 0;
HYPRE_Int S_offd_indices_len = 0;
HYPRE_Int S_offd_indices_allocated_len = 0;
HYPRE_Int offd_intersection_len = 0;
HYPRE_Int offd_intersection_allocated_len = 0;
HYPRE_Int diag_intersection_len = 0;
HYPRE_Int diag_intersection_allocated_len = 0;
HYPRE_Real intersection_len = 0;
HYPRE_Int * Pattern_indices_ptr = NULL;
HYPRE_Int Pattern_diag_indices_len = 0;
HYPRE_Int global_row = 0;
HYPRE_Int has_row_ended = 0;
HYPRE_Real lump_value = 0.;
HYPRE_Real diagonal_lump_value = 0.;
HYPRE_Real neg_lump_value = 0.;
HYPRE_Real sum_strong_neigh = 0.;
HYPRE_Int * rownz = NULL;
/* offd and diag portions of RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(RAP);
/* offd and diag portions of S */
hypre_CSRMatrix *S_diag = NULL;
HYPRE_Int *S_diag_i = NULL;
HYPRE_Real *S_diag_data = NULL;
HYPRE_Int *S_diag_j = NULL;
hypre_CSRMatrix *S_offd = NULL;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Real *S_offd_data = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_BigInt *col_map_offd_S = NULL;
HYPRE_Int num_cols_offd_S;
/* HYPRE_Int num_nonzeros_S_diag; */
/* off processor portions of S */
hypre_CSRMatrix *S_ext = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Real *S_ext_data = NULL;
HYPRE_BigInt *S_ext_j = NULL;
HYPRE_Int *S_ext_diag_i = NULL;
HYPRE_Real *S_ext_diag_data = NULL;
HYPRE_Int *S_ext_diag_j = NULL;
HYPRE_Int *S_ext_offd_i = NULL;
HYPRE_Real *S_ext_offd_data = NULL;
HYPRE_Int *S_ext_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Sext = NULL;
/* HYPRE_Int num_nonzeros_S_ext_diag;
HYPRE_Int num_nonzeros_S_ext_offd;
HYPRE_Int num_rows_Sext = 0; */
HYPRE_Int row_indx_Sext = 0;
/* offd and diag portions of Pattern */
hypre_ParCSRMatrix *Pattern = NULL;
hypre_CSRMatrix *Pattern_diag = NULL;
HYPRE_Int *Pattern_diag_i = NULL;
HYPRE_Real *Pattern_diag_data = NULL;
HYPRE_Int *Pattern_diag_j = NULL;
hypre_CSRMatrix *Pattern_offd = NULL;
HYPRE_Int *Pattern_offd_i = NULL;
HYPRE_Real *Pattern_offd_data = NULL;
HYPRE_Int *Pattern_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Pattern = NULL;
HYPRE_Int num_cols_Pattern_offd;
HYPRE_Int my_id;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Further Initializations */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Compute Sparsity Pattern */
Pattern = hypre_NonGalerkinSparsityPattern(AP, RAP, CF_marker, droptol, sym_collapse, collapse_beta);
Pattern_diag = hypre_ParCSRMatrixDiag(Pattern);
Pattern_diag_i = hypre_CSRMatrixI(Pattern_diag);
Pattern_diag_data = hypre_CSRMatrixData(Pattern_diag);
Pattern_diag_j = hypre_CSRMatrixJ(Pattern_diag);
Pattern_offd = hypre_ParCSRMatrixOffd(Pattern);
Pattern_offd_i = hypre_CSRMatrixI(Pattern_offd);
Pattern_offd_j = hypre_CSRMatrixJ(Pattern_offd);
col_map_offd_Pattern = hypre_ParCSRMatrixColMapOffd(Pattern);
num_cols_Pattern_offd = hypre_CSRMatrixNumCols(Pattern_offd);
if (num_cols_Pattern_offd)
{ Pattern_offd_data = hypre_CSRMatrixData(Pattern_offd); }
/**
* Fill in the entries of Pattern with entries from RAP
**/
/* First, sort column indices in RAP and Pattern */
for(i = 0; i < num_variables; i++)
{
/* The diag matrices store the diagonal as first element in each row.
* We maintain that for the case of Pattern and RAP, because the
* strength of connection routine relies on it and we need to ignore
* diagonal entries in Pattern later during set intersections.
* */
/* Sort diag portion of RAP */
row_start = RAP_diag_i[i];
if( RAP_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = RAP_diag_i[i+1];
hypre_qsort1(RAP_diag_j, RAP_diag_data, row_start, row_end-1 );
/* Sort diag portion of Pattern */
row_start = Pattern_diag_i[i];
if( Pattern_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort offd portion of RAP */
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
hypre_qsort1(RAP_offd_j, RAP_offd_data, row_start, row_end-1 );
/* Sort offd portion of Pattern */
/* Be careful to map coarse dof i with CF_marker into Pattern */
row_start = Pattern_offd_i[i];
row_end = Pattern_offd_i[i+1];
hypre_qsort1(Pattern_offd_j, Pattern_offd_data, row_start, row_end-1 );
}
/* Create Strength matrix based on RAP or Pattern. If Pattern is used,
* then the SortedCopyParCSRData(...) function call must also be commented
* back in */
/* hypre_SortedCopyParCSRData(RAP, Pattern); */
if(0)
{
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum, */
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
num_functions, dof_func_value, &S);
}
else
{
/* Passing in "1, NULL" because dof_array is not needed
* because we assume that the number of functions is 1 */
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum,*/
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
1, NULL, &S);
}
/* Grab diag and offd parts of S */
S_diag = hypre_ParCSRMatrixDiag(S);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
S_offd_i = hypre_CSRMatrixI(S_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
/* num_nonzeros_S_diag = S_diag_i[num_variables]; */
/* Grab part of S that is distance one away from the local rows
* This is needed later for the stencil collapsing. This section
* of the code mimics par_rap.c when it extracts Ps_ext.
* When moving from par_rap.c, the variable name changes were:
* A --> RAP
* P --> S
* Ps_ext --> S_ext
* P_ext_diag --> S_ext_diag
* P_ext_offd --> S_ext_offd
*
* The data layout of S_ext as returned by ExtractBExt gives you only global
* column indices, and must be converted to the local numbering. This code
* section constructs S_ext_diag and S_ext_offd, which are the distance 1
* couplings in S based on the sparsity structure in RAP.
* --> S_ext_diag corresponds to the same column slice that RAP_diag
* corresponds to. Thus, the column indexing is the same as in
* RAP_diag such that S_ext_diag_j[k] just needs to be offset by
* the RAP_diag first global dof offset.
* --> S_ext_offd column indexing is a little more complicated, and
* requires the computation below of col_map_S_ext_offd, which
* maps the local 0,1,2,... column indexing in S_ext_offd to global
* dof numbers. Note, that the num_cols_RAP_offd is NOT equal to
* num_cols_offd_S_ext
* --> The row indexing of S_ext_diag|offd is as follows. Use
* col_map_offd_RAP, where the first index corresponds to the
* first global row index in S_ext_diag|offd. Remember that ExtractBExt
* grabs the information from S required for locally computing
* (RAP*S)[proc_k row slice, :] */
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S,RAP,1);
S_ext_data = hypre_CSRMatrixData(S_ext);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
}
/* This uses the num_cols_RAP_offd to set S_ext_diag|offd_i, because S_ext
* is the off-processor information needed to compute RAP*S. That is,
* num_cols_RAP_offd represents the number of rows needed from S_ext for
* the multiplication */
S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST);
S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd+1, HYPRE_MEMORY_HOST);
S_ext_diag_size = 0;
S_ext_offd_size = 0;
/* num_rows_Sext = num_cols_RAP_offd; */
last_col_diag_RAP = first_col_diag_RAP + num_cols_diag_RAP - 1;
/* construct the S_ext_diag and _offd row-pointer arrays by counting elements
* This looks to create offd and diag blocks related to the local rows belonging
* to this processor...we may not need to split up S_ext this way...or we could.
* It would make for faster binary searching and set intersecting later...this will
* be the bottle neck so LETS SPLIT THIS UP Between offd and diag*/
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
S_ext_offd_size++;
else
S_ext_diag_size++;
S_ext_diag_i[i+1] = S_ext_diag_size;
S_ext_offd_i[i+1] = S_ext_offd_size;
}
if (S_ext_diag_size)
{
S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST);
S_ext_diag_data = hypre_CTAlloc(HYPRE_Real, S_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST);
S_ext_offd_data = hypre_CTAlloc(HYPRE_Real, S_ext_offd_size, HYPRE_MEMORY_HOST);
}
/* This copies over the column indices into the offd and diag parts.
* The diag portion has it's local column indices shifted to start at 0.
* The offd portion requires more work to construct the col_map_offd array
* and a local column ordering. */
cnt_offd = 0;
cnt_diag = 0;
cnt = 0;
for (i=0; i < num_cols_RAP_offd; i++)
{
for (j=S_ext_i[i]; j < S_ext_i[i+1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
{
S_ext_offd_data[cnt_offd] = S_ext_data[j];
//S_ext_offd_j[cnt_offd++] = S_ext_j[j];
S_ext_j[cnt_offd++] = S_ext_j[j];
}
else
{
S_ext_diag_data[cnt_diag] = S_ext_data[j];
S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(S_ext_j[j] - first_col_diag_RAP);
}
}
/* This creates col_map_offd_Sext */
if (S_ext_offd_size || num_cols_offd_S)
{
temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size+num_cols_offd_S, HYPRE_MEMORY_HOST);
for (i=0; i < S_ext_offd_size; i++)
temp[i] = S_ext_j[i];
cnt = S_ext_offd_size;
for (i=0; i < num_cols_offd_S; i++)
temp[cnt++] = col_map_offd_S[i];
}
if (cnt)
{
/* after this, the first so many entries of temp will hold the
* unique column indices in S_ext_offd_j unioned with the indices
* in col_map_offd_S */
hypre_BigQsort0(temp, 0, cnt-1);
num_cols_offd_Sext = 1;
value = temp[0];
for (i=1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Sext++] = value;
}
}
}
else
{
num_cols_offd_Sext = 0;
}
/* num_nonzeros_S_ext_diag = cnt_diag;
num_nonzeros_S_ext_offd = S_ext_offd_size; */
if (num_cols_offd_Sext)
col_map_offd_Sext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Sext, HYPRE_MEMORY_HOST);
for (i=0; i < num_cols_offd_Sext; i++)
col_map_offd_Sext[i] = temp[i];
if (S_ext_offd_size || num_cols_offd_S)
hypre_TFree(temp, HYPRE_MEMORY_HOST);
/* look for S_ext_offd_j[i] in col_map_offd_Sext, and set S_ext_offd_j[i]
* to the index of that column value in col_map_offd_Sext */
for (i=0 ; i < S_ext_offd_size; i++)
S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Sext,
S_ext_j[i],
num_cols_offd_Sext);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(S_ext);
S_ext = NULL;
}
/* Need to sort column indices in S and S_ext */
for(i = 0; i < num_variables; i++)
{
/* Re-Sort diag portion of Pattern, placing the diagonal entry in a
* sorted position */
row_start = Pattern_diag_i[i];
row_end = Pattern_diag_i[i+1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end-1 );
/* Sort diag portion of S, noting that no diagonal entry */
/* S has not "data" array...it's just NULL */
row_start = S_diag_i[i];
row_end = S_diag_i[i+1];
hypre_qsort1(S_diag_j, S_diag_data, row_start, row_end-1 );
/* Sort offd portion of S */
/* S has no "data" array...it's just NULL */
row_start = S_offd_i[i];
row_end = S_offd_i[i+1];
hypre_qsort1(S_offd_j, S_offd_data, row_start, row_end-1 );
}
/* Sort S_ext
* num_cols_RAP_offd equals num_rows for S_ext*/
for(i = 0; i < num_cols_RAP_offd; i++)
{
/* Sort diag portion of S_ext */
row_start = S_ext_diag_i[i];
row_end = S_ext_diag_i[i+1];
hypre_qsort1(S_ext_diag_j, S_ext_diag_data, row_start, row_end-1 );
/* Sort offd portion of S_ext */
row_start = S_ext_offd_i[i];
row_end = S_ext_offd_i[i+1];
hypre_qsort1(S_ext_offd_j, S_ext_offd_data, row_start, row_end-1 );
}
/*
* Now, for the fun stuff -- Computing the Non-Galerkin Operator
*/
/* Initialize the ijmatrix, leveraging our knowledge of the nonzero
* structure in Pattern */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP,
first_col_diag_RAP, last_col_diag_RAP, &ijmatrix);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for(i = 0; i < num_variables; i++)
{ rownz[i] = 1.2*(Pattern_diag_i[i+1] - Pattern_diag_i[i]) + 1.2*(Pattern_offd_i[i+1] - Pattern_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(ijmatrix, rownz);
ierr += HYPRE_IJMatrixInitialize(ijmatrix);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if(sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums= hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols= hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Eliminate Entries In RAP_diag
* */
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_diag_i[i];
row_end = RAP_diag_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
/* Grab pointer to current entry in Pattern_diag */
current_Pattern_j = Pattern_diag_i[i];
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping */
/* Ensure adequate length */
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if(Pattern_offd_indices_allocated_len < Pattern_offd_indices_len)
{
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
Pattern_offd_indices = hypre_CTAlloc(HYPRE_BigInt, Pattern_offd_indices_len, HYPRE_MEMORY_HOST);
Pattern_offd_indices_allocated_len = Pattern_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of Pattern_offd_j */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
col_indx_RAP = RAP_diag_j[j];
/* Ignore zero entries in RAP */
if( RAP_diag_data[j] != 0.0)
{
/* Don't change the diagonal, just write it */
if(col_indx_RAP == i)
{
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, global_row] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, RAP_diag_data[j] );
/*}*/
}
/* The entry in RAP does not appear in Pattern, so LUMP it */
else if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* Lump entry (i, col_indx_RAP) in RAP */
/* Grab the indices for row col_indx_RAP of S_offd and diag. This will
* be for computing lumping locations */
S_offd_indices_len = S_offd_i[col_indx_RAP+1] - S_offd_i[col_indx_RAP];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_offd_j */
hypre_GrabSubArray(S_offd_j, S_offd_i[col_indx_RAP], S_offd_i[col_indx_RAP+1]-1,
col_map_offd_S, S_offd_indices);
/* No need to grab info out of S_diag_j[...], here we just start from
* S_diag_i[col_indx_RAP] and end at index S_diag_i[col_indx_RAP+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
/* This intersection also tracks S_offd_data and assumes that
* S_offd_indices is the first argument here */
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_offd_data[ S_offd_i[col_indx_RAP] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. Note that S_diag_j does
* not have a diagonal entry, so no lumping occurs to the diagonal. */
cnt = hypre_max(Pattern_diag_indices_len,
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
/* There is no diagonal entry in first position of S */
hypre_IntersectTwoArrays( &(S_diag_j[S_diag_i[col_indx_RAP]]),
&(S_diag_data[ S_diag_i[col_indx_RAP] ]),
S_diag_i[col_indx_RAP+1] - S_diag_i[col_indx_RAP],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_diag_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* col_indx_RAP in S, corresponding to the indices we are
* collapsing to in row i This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_diag_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, cnt] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (lump_percent < 1.0)
{
/* Preserve row sum by updating diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if(sym_collapse)
{
/* Update mirror entry */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
/* Update mirror entry diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
/*}*/
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_diag_data[j]; }
else
{ lump_value = RAP_diag_data[j]; }
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if(col_indx_RAP == col_indx_Pattern)
{
cnt = col_indx_RAP+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, RAP_diag_data[j] );
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_diag_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_diag_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
/*
* Eliminate Entries In RAP_offd
* Structure of this for-loop is very similar to the RAP_diag for-loop
* But, not so similar that these loops should be combined into a single fuction.
* */
if(num_cols_RAP_offd)
{
for(i = 0; i < num_variables; i++)
{
global_row = i+first_col_diag_RAP;
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i+1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if( row_start < row_end)
{
current_Pattern_j = Pattern_offd_i[i];
Pattern_offd_indices_len = Pattern_offd_i[i+1] - Pattern_offd_i[i];
if( (Pattern_offd_j != NULL) && (Pattern_offd_indices_len > 0) )
{ col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; }
else
{ /* if Pattern_offd_j is not allocated or this is a zero length row,
then all entries need to be lumped.
This is an analagous situation to has_row_ended=1. */
col_indx_Pattern = -1;
has_row_ended = 1;
}
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping. The above
* loop over RAP_diag ensures adequate length of Pattern_offd_indices */
/* Ensure adequate length */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i+1]-1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]+1]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i+1] - Pattern_diag_i[i];
}
}
for(j = row_start; j < row_end; j++)
{
/* Ignore zero entries in RAP */
if( RAP_offd_data[j] != 0.0)
{
/* In general for all the offd_j arrays, we have to indirectly
* index with the col_map_offd array to get a global index */
col_indx_RAP = col_map_offd_RAP[ RAP_offd_j[j] ];
/* The entry in RAP does not appear in Pattern, so LUMP it */
if( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* The row_indx_Sext would be found with:
row_indx_Sext = hypre_BinarySearch(col_map_offd_RAP, col_indx_RAP, num_cols_RAP_offd);
But, we already know the answer to this with, */
row_indx_Sext = RAP_offd_j[j];
/* Grab the indices for row row_indx_Sext from the offd and diag parts. This will
* be for computing lumping locations */
S_offd_indices_len = S_ext_offd_i[row_indx_Sext+1] - S_ext_offd_i[row_indx_Sext];
if(S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_ext_offd_j */
hypre_GrabSubArray(S_ext_offd_j, S_ext_offd_i[row_indx_Sext], S_ext_offd_i[row_indx_Sext+1]-1,
col_map_offd_Sext, S_offd_indices);
/* No need to grab info out of S_ext_diag_j[...], here we just start from
* S_ext_diag_i[row_indx_Sext] and end at index S_ext_diag_i[row_indx_Sext+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if(offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_ext_offd_data[ S_ext_offd_i[row_indx_Sext] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. */
cnt = hypre_max(Pattern_diag_indices_len,
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext] );
if(diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
hypre_IntersectTwoArrays( &(S_ext_diag_j[S_ext_diag_i[row_indx_Sext]]),
&(S_ext_diag_data[ S_ext_diag_i[row_indx_Sext] ]),
S_ext_diag_i[row_indx_Sext+1] - S_ext_diag_i[row_indx_Sext],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_offd_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if(intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* row_indx_Sext in S, corresponding to the indices we are
* collapsing to in row i. This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for(k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for(k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_offd_data[j]/sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for(k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k]+first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value);
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for(k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k])*sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k])*sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5*RAP_offd_data[j]; }
else
{ lump_value = RAP_offd_data[j]; }
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, col_indx_RAP, global_row,
lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if (col_indx_RAP == col_indx_Pattern)
{
/* For the offd structure, col_indx_RAP is a global dof number */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
RAP_offd_data[j]);
/* Only go to the next entry in Pattern, if this is not the end of a row */
if( current_Pattern_j < Pattern_offd_i[i+1]-1 )
{
current_Pattern_j += 1;
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if(col_indx_RAP > col_indx_Pattern)
{
for(; current_Pattern_j < Pattern_offd_i[i+1]; current_Pattern_j++)
{
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
if(col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if(col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if(sym_collapse)
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Assemble non-Galerkin Matrix, and overwrite current RAP*/
ierr += HYPRE_IJMatrixAssemble (ijmatrix);
ierr += HYPRE_IJMatrixGetObject( ijmatrix, (void**) RAP_ptr);
/* Optional diagnostic matrix printing */
if (0)
{
hypre_sprintf(filename, "Pattern_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(Pattern, 0, 0, filename);
hypre_sprintf(filename, "Strength_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(S, 0, 0, filename);
hypre_sprintf(filename, "RAP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(RAP, 0, 0, filename);
hypre_sprintf(filename, "RAPc_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(*RAP_ptr, 0, 0, filename);
hypre_sprintf(filename, "AP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(AP, 0, 0, filename);
}
/* Free matrices and variables and arrays */
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if(sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
if (S_ext_diag_size)
{
hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_data, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_Sext)
{ hypre_TFree(col_map_offd_Sext, HYPRE_MEMORY_HOST); }
ierr += hypre_ParCSRMatrixDestroy(Pattern);
ierr += hypre_ParCSRMatrixDestroy(RAP);
ierr += hypre_ParCSRMatrixDestroy(S);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, -1);
ierr += HYPRE_IJMatrixDestroy(ijmatrix);
/*end_time = hypre_MPI_Wtime();
if(my_id == 0)
{ fprintf(stdout, "NonGalerkin Time: %1.2e\n", end_time-start_time); } */
return ierr;
}
|
GB_unop__abs_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__abs_fp32_fp32)
// op(A') function: GB (_unop_tran__abs_fp32_fp32)
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = fabsf (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = fabsf (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = fabsf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__abs_fp32_fp32)
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = fabsf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = fabsf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__abs_fp32_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mongodb_fmt_plug.c | /* Cracker for both MongoDB system and sniffed network hashes. Hacked together
* during November of 2012 by Dhiru Kholia <dhiru at openwall.com>.
*
* Based on https://github.com/cyberpunkych/attacking_mongodb
*
* Hash format for MongoDB system hashes: user:$mongodb$0$user$hash
* Hash format for MongoDB network hashes: user:$mongodb$1$user$salt$hash
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted. */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_mongodb;
#elif FMT_REGISTERS_H
john_register_one(&fmt_mongodb);
#else
#include "md5.h"
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#ifdef __MIC__
#define OMP_SCALE 512
#else
#define OMP_SCALE 16384 // Tuned on K8-dual HT
#endif // __MIC__
#endif // OMP_SCALE
#endif // _OPENMP
#include "memdbg.h"
#define FORMAT_LABEL "MongoDB"
#define FORMAT_NAME "system / network"
#define FORMAT_TAG "$mongodb$"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#define ALGORITHM_NAME "MD5 32/" ARCH_BITS_STR
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 32
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#define SALT_ALIGN sizeof(int)
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
static struct fmt_tests mongodb_tests[] = {
{"$mongodb$0$sa$75692b1d11c072c6c79332e248c4f699", "sa"},
{"$mongodb$1$sa$58d3229c83e3f87e$0c85e3f74adce5d037426791940c820a", "sa"},
/* Ettercap generated test vectors */
{"$mongodb$1$sa$10441db416a99ffc$797d7e18879446845f10ae9d519960b2", "longpassword"},
{"$mongodb$1$longusername$86336266301fb552$1abe48bac6ad0bf567ab51b094f026a9", "longpassword"},
/* Ettercap fixed salt MiTM attack generated test vectors */
{"$mongodb$1$longusername$0000000000000000$53257e018399a241849cb04c70ba8daa", "longpassword"},
{"$mongodb$1$longusername$0000000000000000$10290925d16d81e50db242c9f3572d91", "longpassword@12345678"},
{"$mongodb$1$eight18_characters$8c82aec197929775$5c414259f7f7a42f8c4d1b6ffb37913a", "123"},
{"$mongodb$1$Herman$9b90cf265f3194d7$a5ca2c517c06fdfb773144d53fb26f56", "123456789"},
{"$mongodb$1$sz110$be8fa52f0e64c250$441d6ece7356c67dcc69dd26e7e0501f", "passWOrd"},
{"$mongodb$1$jack$304b81adddfb4d6f$c95e106f1d9952c88044a0b21a6bd3fd", ""},
// https://jira.mongodb.org/browse/SERVER-9476
{"$mongodb$1$z$ce88504553b16752$6deb79af26ebcdd2b2c40438008cb7b0", "g"},
// https://github.com/mongodb/specifications/blob/master/source/auth/auth.rst
{"$mongodb$1$user$2375531c32080ae8$21742f26431831d5cfca035a08c5bdf6", "pencil"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
int type;
unsigned char salt[17];
unsigned char username[128];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ptr, *ctcopy, *keeptr;
int type, extra;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN))
return 0;
if (!(ctcopy = strdup(ciphertext)))
return 0;
keeptr = ctcopy;
ctcopy += FORMAT_TAG_LEN;
if (!(ptr = strtokm(ctcopy, "$"))) /* type */
goto error;
if (!isdec(ptr))
goto error;
type = atoi(ptr);
if (type != 0 && type != 1)
goto error;
if (!(ptr = strtokm(NULL, "$"))) /* username */
goto error;
if (strlen(ptr) > 127)
goto error;
if (type == 0) {
if (!(ptr = strtokm(NULL, "$"))) /* hash */
goto error;
if (hexlenl(ptr, &extra) != 32 || extra)
goto error;
} else {
if (!(ptr = strtokm(NULL, "$"))) /* salt */
goto error;
if (hexlenl(ptr, &extra) != 16 || extra)
goto error;
if (!(ptr = strtokm(NULL, "$"))) /* hash */
goto error;
if (hexlenl(ptr, &extra) != 32 || extra)
goto error;
}
MEM_FREE(keeptr);
return 1;
error:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
ctcopy += FORMAT_TAG_LEN; /* skip over "$mongodb$" */
p = strtokm(ctcopy, "$");
cs.type = atoi(p);
p = strtokm(NULL, "$");
strcpy((char*)cs.username, p);
if (cs.type == 1) {
p = strtokm(NULL, "$");
strcpy((char*)cs.salt, p);
}
MEM_FREE(keeptr);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
p = strrchr(ciphertext, '$') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] =
(atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static inline void hex_encode(unsigned char *str, int len, unsigned char *out)
{
int i;
for (i = 0; i < len; ++i) {
out[0] = itoa16[str[i]>>4];
out[1] = itoa16[str[i]&0xF];
out += 2;
}
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
if (cur_salt->type == 0) {
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username));
MD5_Update(&ctx, ":mongo:", 7);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
else {
unsigned char hexout[32];
unsigned char out[32];
MD5_CTX ctx;
MD5_Init(&ctx);
MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username));
MD5_Update(&ctx, ":mongo:", 7);
MD5_Update(&ctx, saved_key[index], strlen(saved_key[index]));
MD5_Final(out, &ctx);
hex_encode(out, 16, hexout);
MD5_Init(&ctx);
MD5_Update(&ctx, cur_salt->salt, 16);
MD5_Update(&ctx, cur_salt->username, strlen((char*)cur_salt->username));
MD5_Update(&ctx, hexout, 32);
MD5_Final((unsigned char*)crypt_out[index], &ctx);
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void mongodb_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
/*
* report salt type as first "tunable cost"
*/
static unsigned int mongodb_salt_type(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->type;
}
struct fmt_main fmt_mongodb = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD,
{
"salt type",
/* FIXME: report user name length as 2nd cost? */
},
{ FORMAT_TAG },
mongodb_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
mongodb_salt_type,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
mongodb_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
convolution_3x3_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
kernel0 += 9;
}
}
}
static void conv3x3s1_winograd23_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(4*4, inch, outch, 2ul);
// G
const short ktm[4][3] = {
{ 2, 0, 0},
{ 1, 1, 1},
{ 1, -1, 1},
{ 0, 0, 2}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[4][3];
for (int i=0; i<4; i++)
{
tmp[i][0] = (short)k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = (short)k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = (short)k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<4; j++)
{
short* tmpp = &tmp[j][0];
for (int i=0; i<4; i++)
{
kernel_tm0[j*4 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd23_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 2n+2, winograd F(2,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 1) / 2 * 2;
outh = (outh + 1) / 2 * 2;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(4*4, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {1.0f, 0.0f, -1.0f, 0.0f},
// {0.0f, 1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 1.00f, 0.0f},
// {0.0f, -1.0f, 0.00f, 1.0f}
// };
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 2;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[4],d1[4],d2[4],d3[4];
short w0[4],w1[4],w2[4],w3[4];
short t0[4],t1[4],t2[4],t3[4];
// load
for (int n = 0; n < 4; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
}
// w = B_t * d
for (int n = 0; n < 4; n++)
{
w0[n] = d0[n] - d2[n];
w1[n] = d1[n] + d2[n];
w2[n] = d2[n] - d1[n];
w3[n] = d3[n] - d1[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3];
}
// U = B_t * d_t
for (int n = 0; n < 4; n++)
{
d0[n] = t0[n] - t2[n];
d1[n] = t1[n] + t2[n];
d2[n] = t2[n] - t1[n];
d3[n] = t3[n] - t1[n];
}
// save to out_tm
for (int n = 0; n < 4; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 4] = d1[n];
out_tm0[n+ 8] = d2[n];
out_tm0[n+12] = d3[n];
}
r0 += 2;
r1 += 2;
r2 += 2;
r3 += 2;
out_tm0 += 16;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(16, tiles, outch, 4u, opt.workspace_allocator);
int nn_outch = outch >> 2;
int remain_outch_start = nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 4;
Mat out0_tm = top_blob_tm.channel(p);
Mat out1_tm = top_blob_tm.channel(p+1);
Mat out2_tm = top_blob_tm.channel(p+2);
Mat out3_tm = top_blob_tm.channel(p+3);
const Mat kernel0_tm = kernel_tm.channel(p);
const Mat kernel1_tm = kernel_tm.channel(p+1);
const Mat kernel2_tm = kernel_tm.channel(p+2);
const Mat kernel3_tm = kernel_tm.channel(p+3);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int* output1_tm = out1_tm.row<int>(i);
int* output2_tm = out2_tm.row<int>(i);
int* output3_tm = out3_tm.row<int>(i);
int sum0[16] = {0};
int sum1[16] = {0};
int sum2[16] = {0};
int sum3[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
k0 += 16;
sum0[n] += (int)r1[n] * k0[n];
k0 += 16;
sum0[n] += (int)r2[n] * k0[n];
k0 += 16;
sum0[n] += (int)r3[n] * k0[n];
k0 -= 16 * 3;
sum1[n] += (int)r0[n] * k1[n];
k1 += 16;
sum1[n] += (int)r1[n] * k1[n];
k1 += 16;
sum1[n] += (int)r2[n] * k1[n];
k1 += 16;
sum1[n] += (int)r3[n] * k1[n];
k1 -= 16 * 3;
sum2[n] += (int)r0[n] * k2[n];
k2 += 16;
sum2[n] += (int)r1[n] * k2[n];
k2 += 16;
sum2[n] += (int)r2[n] * k2[n];
k2 += 16;
sum2[n] += (int)r3[n] * k2[n];
k2 -= 16 * 3;
sum3[n] += (int)r0[n] * k3[n];
k3 += 16;
sum3[n] += (int)r1[n] * k3[n];
k3 += 16;
sum3[n] += (int)r2[n] * k3[n];
k3 += 16;
sum3[n] += (int)r3[n] * k3[n];
k3 -= 16 * 3;
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel1_tm.row<short>(q);
const short* k2 = kernel2_tm.row<short>(q);
const short* k3 = kernel3_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum1[n] += (int)r0[n] * k1[n];
sum2[n] += (int)r0[n] * k2[n];
sum3[n] += (int)r0[n] * k3[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
output1_tm[n] = sum1[n];
output2_tm[n] = sum2[n];
output3_tm[n] = sum3[n];
}
}
}
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[16] = {0};
int q = 0;
for (; q+3<inch; q+=4)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* r1 = bottom_blob_tm.channel(q+1).row<short>(i);
const short* r2 = bottom_blob_tm.channel(q+2).row<short>(i);
const short* r3 = bottom_blob_tm.channel(q+3).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
const short* k1 = kernel0_tm.row<short>(q+1);
const short* k2 = kernel0_tm.row<short>(q+2);
const short* k3 = kernel0_tm.row<short>(q+3);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
sum0[n] += (int)r1[n] * k1[n];
sum0[n] += (int)r2[n] * k2[n];
sum0[n] += (int)r3[n] * k3[n];
}
}
for (; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n=0; n<16; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n=0; n<16; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[2][4] = {
// {1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 1.0f}
// };
int w_tm = outw / 2 * 4;
int h_tm = outh / 2 * 4;
int nColBlocks = h_tm/4; // may be the block num in Feathercnn
int nRowBlocks = w_tm/4;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j=0; j<nColBlocks; j++)
{
int* outRow0 = out.row<int>(j*2);
int* outRow1 = out.row<int>(j*2+1);
for(int i=0; i<nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j*nRowBlocks + i);
int s0[4],s1[4],s2[4],s3[4];
int w0[4],w1[4];
int d0[2],d1[2],d2[2],d3[2];
int o0[2],o1[2];
// load
for (int n = 0; n < 4; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 4];
s2[n] = out_tile[n+ 8];
s3[n] = out_tile[n+12];
}
// w = A_T * W
for (int n = 0; n < 4; n++)
{
w0[n] = s0[n] + s1[n] + s2[n];
w1[n] = s1[n] - s2[n] + s3[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0];
d1[0] = w0[1]; d1[1] = w1[1];
d2[0] = w0[2]; d2[1] = w1[2];
d3[0] = w0[3]; d3[1] = w1[3];
}
// Y = A_T * w_t
for (int n = 0; n < 2; n++)
{
o0[n] = d0[n] + d1[n] + d2[n];
o1[n] = d1[n] - d2[n] + d3[n];
}
// save to top blob tm,why right 2,because the G' = G*2
outRow0[0] = o0[0] >> 2;
outRow0[1] = o0[1] >> 2;
outRow1[0] = o1[0] >> 2;
outRow1[1] = o1[1] >> 2;
outRow0 += 2;
outRow1 += 2;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_int8_sse(const Mat& kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(6*6, inch, outch, 2ul);
// G
// const float ktm[6][3] = {
// { 1.0f/4, 0.0f, 0.0f},
// { -1.0f/6, -1.0f/6, -1.0f/6},
// { -1.0f/6, 1.0f/6, -1.0f/6},
// { 1.0f/24, 1.0f/12, 1.0f/6},
// { 1.0f/24, -1.0f/12, 1.0f/6},
// { 0.0f, 0.0f, 1.0f}
// };
const short ktm[6][3] = {
{ 6, 0, 0},
{ -4, -4, -4},
{ -4, 4, -4},
{ 1, 2, 4},
{ 1, -2, 4},
{ 0, 0, 24}
};
#pragma omp parallel for
for (int p = 0; p<outch; p++)
{
for (int q = 0; q<inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p*inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i=0; i<6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j=0; j<6; j++)
{
short* tmpp = &tmp[j][0];
for (int i=0; i<6; i++)
{
kernel_tm0[j*6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
}
static void conv3x3s1_winograd43_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2, winograd F(4,3)
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
Option opt_b = opt;
opt_b.blob_allocator = opt.workspace_allocator;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, 0, 0.f, opt_b);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
bottom_blob_tm.create(6*6, tiles, inch, 2u, opt.workspace_allocator);
// BT
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r03 + r04
// 2 = 4 * (r01 - r02) - r03 + r04
// 3 = -2 * r01 - r02 + 2 * r03 + r04
// 4 = 2 * r01 - r02 - 2 * r03 + r04
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q=0; q<inch; q++)
{
const signed char* img = bottom_blob_bordered.channel(q);
short* out_tm0 = bottom_blob_tm.channel(q);
for (int j = 0; j < nColBlocks; j++)
{
const signed char* r0 = img + w * j * 4;
const signed char* r1 = r0 + w;
const signed char* r2 = r1 + w;
const signed char* r3 = r2 + w;
const signed char* r4 = r3 + w;
const signed char* r5 = r4 + w;
for (int i = 0; i < nRowBlocks; i++)
{
short d0[6],d1[6],d2[6],d3[6],d4[6],d5[6];
short w0[6],w1[6],w2[6],w3[6],w4[6],w5[6];
short t0[6],t1[6],t2[6],t3[6],t4[6],t5[6];
// load
for (int n = 0; n < 6; n++)
{
d0[n] = r0[n];
d1[n] = r1[n];
d2[n] = r2[n];
d3[n] = r3[n];
d4[n] = r4[n];
d5[n] = r5[n];
}
// w = B_t * d
for (int n = 0; n < 6; n++)
{
w0[n] = 4*d0[n] - 5*d2[n] + d4[n];
w1[n] = -4*d1[n] - 4*d2[n] + d3[n] + d4[n];
w2[n] = 4*d1[n] - 4*d2[n] - d3[n] + d4[n];
w3[n] = -2*d1[n] - d2[n] + 2*d3[n] + d4[n];
w4[n] = 2*d1[n] - d2[n] - 2*d3[n] + d4[n];
w5[n] = 4*d1[n] - 5*d3[n] + d5[n];
}
// transpose d to d_t
{
t0[0]=w0[0]; t1[0]=w0[1]; t2[0]=w0[2]; t3[0]=w0[3]; t4[0]=w0[4]; t5[0]=w0[5];
t0[1]=w1[0]; t1[1]=w1[1]; t2[1]=w1[2]; t3[1]=w1[3]; t4[1]=w1[4]; t5[1]=w1[5];
t0[2]=w2[0]; t1[2]=w2[1]; t2[2]=w2[2]; t3[2]=w2[3]; t4[2]=w2[4]; t5[2]=w2[5];
t0[3]=w3[0]; t1[3]=w3[1]; t2[3]=w3[2]; t3[3]=w3[3]; t4[3]=w3[4]; t5[3]=w3[5];
t0[4]=w4[0]; t1[4]=w4[1]; t2[4]=w4[2]; t3[4]=w4[3]; t4[4]=w4[4]; t5[4]=w4[5];
t0[5]=w5[0]; t1[5]=w5[1]; t2[5]=w5[2]; t3[5]=w5[3]; t4[5]=w5[4]; t5[5]=w5[5];
}
// d = B_t * d_t
for (int n = 0; n < 6; n++)
{
d0[n] = 4*t0[n] - 5*t2[n] + t4[n];
d1[n] = - 4*t1[n] - 4*t2[n] + t3[n] + t4[n];
d2[n] = 4*t1[n] - 4*t2[n] - t3[n] + t4[n];
d3[n] = - 2*t1[n] - t2[n] + 2*t3[n] + t4[n];
d4[n] = 2*t1[n] - t2[n] - 2*t3[n] + t4[n];
d5[n] = 4*t1[n] - 5*t3[n] + t5[n];
}
// save to out_tm
for (int n = 0; n < 6; n++)
{
out_tm0[n ] = d0[n];
out_tm0[n+ 6] = d1[n];
out_tm0[n+12] = d2[n];
out_tm0[n+18] = d3[n];
out_tm0[n+24] = d4[n];
out_tm0[n+30] = d5[n];
}
r0 += 4;
r1 += 4;
r2 += 4;
r3 += 4;
r4 += 4;
r5 += 4;
out_tm0 += 36;
}
}
}
}
bottom_blob_bordered = Mat();
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
const int tiles = nColBlocks * nRowBlocks;
top_blob_tm.create(36, tiles, outch, 4u, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int i=0; i<tiles; i++)
{
int* output0_tm = out0_tm.row<int>(i);
int sum0[36] = {0};
for (int q=0; q<inch; q++)
{
const short* r0 = bottom_blob_tm.channel(q).row<short>(i);
const short* k0 = kernel0_tm.row<short>(q);
for (int n=0; n<36; n++)
{
sum0[n] += (int)r0[n] * k0[n];
}
}
for (int n=0; n<36; n++)
{
output0_tm[n] = sum0[n];
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
top_blob_bordered.create(outw, outh, outch, 4u, opt.workspace_allocator);
{
// AT
// const float itm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + r01 + r02 + r03 + r04
// 1 = r01 - r02 + 2 * (r03 - r04)
// 2 = r01 + r02 + 4 * (r03 + r04)
// 3 = r01 - r02 + 8 * (r03 - r04) + r05
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
int nColBlocks = h_tm/6; // may be the block num in Feathercnn
int nRowBlocks = w_tm/6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out_tm = top_blob_tm.channel(p);
Mat out = top_blob_bordered.channel(p);
for (int j=0; j<nColBlocks; j++)
{
int* outRow0 = out.row<int>(j*4);
int* outRow1 = out.row<int>(j*4+1);
int* outRow2 = out.row<int>(j*4+2);
int* outRow3 = out.row<int>(j*4+3);
for(int i=0; i<nRowBlocks; i++)
{
int* out_tile = out_tm.row<int>(j*nRowBlocks + i);
int s0[6],s1[6],s2[6],s3[6],s4[6],s5[6];
int w0[6],w1[6],w2[6],w3[6];
int d0[4],d1[4],d2[4],d3[4],d4[4],d5[4];
int o0[4],o1[4],o2[4],o3[4];
// load
for (int n = 0; n < 6; n++)
{
s0[n] = out_tile[n];
s1[n] = out_tile[n+ 6];
s2[n] = out_tile[n+12];
s3[n] = out_tile[n+18];
s4[n] = out_tile[n+24];
s5[n] = out_tile[n+30];
}
// w = A_T * W
for (int n = 0; n < 6; n++)
{
w0[n] = s0[n] + s1[n] + s2[n] + s3[n] + s4[n];
w1[n] = s1[n] - s2[n] + 2*s3[n] - 2*s4[n];
w2[n] = s1[n] + s2[n] + 4*s3[n] + 4*s4[n];
w3[n] = s1[n] - s2[n] + 8*s3[n] - 8*s4[n] + s5[n];
}
// transpose w to w_t
{
d0[0] = w0[0]; d0[1] = w1[0]; d0[2] = w2[0]; d0[3] = w3[0];
d1[0] = w0[1]; d1[1] = w1[1]; d1[2] = w2[1]; d1[3] = w3[1];
d2[0] = w0[2]; d2[1] = w1[2]; d2[2] = w2[2]; d2[3] = w3[2];
d3[0] = w0[3]; d3[1] = w1[3]; d3[2] = w2[3]; d3[3] = w3[3];
d4[0] = w0[4]; d4[1] = w1[4]; d4[2] = w2[4]; d4[3] = w3[4];
d5[0] = w0[5]; d5[1] = w1[5]; d5[2] = w2[5]; d5[3] = w3[5];
}
// Y = A_T * w_t
for (int n = 0; n < 4; n++)
{
o0[n] = d0[n] + d1[n] + d2[n] + d3[n] + d4[n];
o1[n] = d1[n] - d2[n] + 2*d3[n] - 2*d4[n];
o2[n] = d1[n] + d2[n] + 4*d3[n] + 4*d4[n];
o3[n] = d1[n] - d2[n] + 8*d3[n] - 8*d4[n] + d5[n];
}
// save to top blob tm
for (int n = 0; n < 4; n++)
{
outRow0[n] = o0[n] / 576;
outRow1[n] = o1[n] / 576;
outRow2[n] = o2[n] / 576;
outRow3[n] = o3[n] / 576;
}
outRow0 += 4;
outRow1 += 4;
outRow2 += 4;
outRow3 += 4;
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s2_int8_sse(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int inch = bottom_blob.c;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2 * outw + w;
const signed char *kernel = _kernel;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out0 = top_blob.channel(p);
out0.fill(0);
const signed char *kernel0 = (const signed char *)kernel + p * inch * 9;
for (int q = 0; q < inch; q++)
{
int *outptr0 = out0;
const signed char *img0 = bottom_blob.channel(q);
const signed char *r0 = img0;
const signed char *r1 = img0 + w;
const signed char *r2 = img0 + w * 2;
for (int i = 0; i < outh; i++)
{
int remain = outw;
for (; remain > 0; remain--)
{
int sum0 = 0;
sum0 += (int)r0[0] * kernel0[0];
sum0 += (int)r0[1] * kernel0[1];
sum0 += (int)r0[2] * kernel0[2];
sum0 += (int)r1[0] * kernel0[3];
sum0 += (int)r1[1] * kernel0[4];
sum0 += (int)r1[2] * kernel0[5];
sum0 += (int)r2[0] * kernel0[6];
sum0 += (int)r2[1] * kernel0[7];
sum0 += (int)r2[2] * kernel0[8];
*outptr0 += sum0;
r0 += 2;
r1 += 2;
r2 += 2;
outptr0++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
kernel0 += 9;
}
}
}
|
rose_v1_jacobi_seq.c | /* An example code
*
* */
#include <stdio.h>
#include <math.h>
#include <omp.h>
void driver();
void initialize();
void jacobi();
void error_check();
#define MSIZE 200
int n;
int m;
int mits;
double tol;
double relax = 1.0;
double alpha = 0.0543;
double u[200][200];
double f[200][200];
double uold[200][200];
double dx;
double dy;
int main()
{
// float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n = 200;
m = 200;
tol = 0.0000000001;
mits = 1000;
driver();
return 1;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialzed.
*
* Working varaibles/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver()
{
initialize();
/* Solve Helmholtz equation */
jacobi();
/* error_check (n,m,alpha,dx,dy,u,f) */
error_check();
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize()
{
int i;
int j;
int xx;
int yy;
// double PI = 3.1415926;
// -->dx@112:2
dx = 2.0 / (n - 1);
//-->dy@113:2
dy = 2.0 / (m - 1);
/* Initialize initial condition and RHS */
//#pragma omp parallel for private(i,j,xx,yy)
#pragma omp parallel for private (xx,yy,i,j) firstprivate (n,m)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (xx,yy,j) firstprivate (alpha,dx,dy)
for (j = 0; j <= m - 1; j += 1) {
/* -1 < x < 1 */
xx = ((int )(- 1.0 + dx * (i - 1)));
/* -1 < y < 1 */
yy = ((int )(- 1.0 + dy * (j - 1)));
u[i][j] = 0.0;
f[i][j] = - 1.0 * alpha * (1.0 - (xx * xx)) * (1.0 - (yy * yy)) - 2.0 * (1.0 - (xx * xx)) - 2.0 * (1.0 - (yy * yy));
}
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi()
{
double omega;
int i;
int j;
int k;
double error;
double resid;
double ax;
double ay;
double b;
omega = relax;
/*
* Initialize coefficients */
/* X-direction coef */
ax = 1.0 / (dx * dx);
/* Y-direction coef */
ay = 1.0 / (dy * dy);
/* Central coeff */
b = - 2.0 / (dx * dx) - 2.0 / (dy * dy) - alpha;
error = 10.0 * tol;
k = 1;
while(k <= mits && error > tol){
error = 0.0;
/* Copy new solution into old */
//#pragma omp parallel
{
//#pragma omp for private(i,j)
#pragma omp parallel for private (i,j)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (j)
for (j = 0; j <= m - 1; j += 1) {
uold[i][j] = u[i][j];
}
}
//#pragma omp for private(i,j,resid) reduction(+:error) nowait
for (i = 1; i <= n - 1 - 1; i += 1) {
for (j = 1; j <= m - 1 - 1; j += 1) {
resid = (ax * (uold[i - 1][j] + uold[i + 1][j]) + ay * (uold[i][j - 1] + uold[i][j + 1]) + b * uold[i][j] - f[i][j]) / b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid * resid;
}
}
}
/* omp end parallel */
/* Error check */
// k = k + 1;
error = sqrt(error) / (n * m);
/* End iteration loop */
}
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n",error);
}
void error_check()
{
int i;
int j;
double xx;
double yy;
double temp;
double error;
dx = 2.0 / (n - 1);
dy = 2.0 / (m - 1);
error = 0.0;
//#pragma omp parallel for private(i,j,xx,yy,temp) reduction(+:error)
#pragma omp parallel for private (xx,yy,temp,i,j) reduction (+:error)
for (i = 0; i <= n - 1; i += 1) {
#pragma omp parallel for private (xx,yy,temp,j) reduction (+:error) firstprivate (dx,dy)
for (j = 0; j <= m - 1; j += 1) {
xx = - 1.0 + dx * (i - 1);
yy = - 1.0 + dy * (j - 1);
temp = u[i][j] - (1.0 - xx * xx) * (1.0 - yy * yy);
error = error + temp * temp;
}
}
error = sqrt(error) / (n * m);
printf("Solution Error :%E \n",error);
}
|
archive_blake2sp_ref.c | /*
BLAKE2 reference source code package - reference C implementations
Copyright 2012, Samuel Neves <sneves@dei.uc.pt>. You may use this under the
terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at
your option. The terms of these licenses can be found at:
- CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0
- OpenSSL license : https://www.openssl.org/source/license.html
- Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0
More information about the BLAKE2 hash function can be found at
https://blake2.net.
*/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#if defined(_OPENMP)
#include <omp.h>
#endif
#include "archive_platform.h"
#include "archive_blake2.h"
#include "archive_blake2_impl.h"
#define PARALLELISM_DEGREE 8
/*
blake2sp_init_param defaults to setting the expecting output length
from the digest_length parameter block field.
In some cases, however, we do not want this, as the output length
of these instances is given by inner_length instead.
*/
static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P )
{
int err = blake2s_init_param(S, P);
S->outlen = P->inner_length;
return err;
}
static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint32_t offset )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, offset );
store16( &P->xof_length, 0 );
P->node_depth = 0;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2sp_init_leaf_param( S, P );
}
static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen )
{
blake2s_param P[1];
P->digest_length = (uint8_t)outlen;
P->key_length = (uint8_t)keylen;
P->fanout = PARALLELISM_DEGREE;
P->depth = 2;
store32( &P->leaf_length, 0 );
store32( &P->node_offset, 0 );
store16( &P->xof_length, 0 );
P->node_depth = 1;
P->inner_length = BLAKE2S_OUTBYTES;
memset( P->salt, 0, sizeof( P->salt ) );
memset( P->personal, 0, sizeof( P->personal ) );
return blake2s_init_param( S, P );
}
int blake2sp_init( blake2sp_state *S, size_t outlen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, 0 ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, 0, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
return 0;
}
int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen )
{
size_t i;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1;
memset( S->buf, 0, sizeof( S->buf ) );
S->buflen = 0;
S->outlen = outlen;
if( blake2sp_init_root( S->R, outlen, keylen ) < 0 )
return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S->S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S->R->last_node = 1;
S->S[PARALLELISM_DEGREE - 1]->last_node = 1;
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
return 0;
}
int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen )
{
const unsigned char * in = (const unsigned char *)pin;
size_t left = S->buflen;
size_t fill = sizeof( S->buf ) - left;
size_t i;
if( left && inlen >= fill )
{
memcpy( S->buf + left, in, fill );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES );
in += fill;
inlen -= fill;
left = 0;
}
#if defined(_OPENMP)
#pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
}
in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES );
inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
if( inlen > 0 )
memcpy( S->buf + left, in, inlen );
S->buflen = left + inlen;
return 0;
}
int blake2sp_final( blake2sp_state *S, void *out, size_t outlen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
size_t i;
if(out == NULL || outlen < S->outlen) {
return -1;
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
{
if( S->buflen > i * BLAKE2S_BLOCKBYTES )
{
size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES;
if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES;
blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left );
}
blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES );
}
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( S->R, out, S->outlen );
}
int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen )
{
uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES];
blake2s_state S[PARALLELISM_DEGREE][1];
blake2s_state FS[1];
size_t i;
/* Verify parameters */
if ( NULL == in && inlen > 0 ) return -1;
if ( NULL == out ) return -1;
if ( NULL == key && keylen > 0) return -1;
if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1;
if( keylen > BLAKE2S_KEYBYTES ) return -1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
if( blake2sp_init_leaf( S[i], outlen, keylen, (uint32_t)i ) < 0 ) return -1;
S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */
if( keylen > 0 )
{
uint8_t block[BLAKE2S_BLOCKBYTES];
memset( block, 0, BLAKE2S_BLOCKBYTES );
memcpy( block, key, keylen );
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES );
secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */
}
#if defined(_OPENMP)
#pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE)
#else
for( i = 0; i < PARALLELISM_DEGREE; ++i )
#endif
{
#if defined(_OPENMP)
size_t i = omp_get_thread_num();
#endif
size_t inlen__ = inlen;
const unsigned char *in__ = ( const unsigned char * )in;
in__ += i * BLAKE2S_BLOCKBYTES;
while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES )
{
blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES );
in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES;
}
if( inlen__ > i * BLAKE2S_BLOCKBYTES )
{
const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES;
const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES;
blake2s_update( S[i], in__, len );
}
blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES );
}
if( blake2sp_init_root( FS, outlen, keylen ) < 0 )
return -1;
FS->last_node = 1;
for( i = 0; i < PARALLELISM_DEGREE; ++i )
blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES );
return blake2s_final( FS, out, outlen );
}
#if defined(BLAKE2SP_SELFTEST)
#include <string.h>
#include "blake2-kat.h"
int main( void )
{
uint8_t key[BLAKE2S_KEYBYTES];
uint8_t buf[BLAKE2_KAT_LENGTH];
size_t i, step;
for( i = 0; i < BLAKE2S_KEYBYTES; ++i )
key[i] = ( uint8_t )i;
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
buf[i] = ( uint8_t )i;
/* Test simple API */
for( i = 0; i < BLAKE2_KAT_LENGTH; ++i )
{
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES );
if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) )
{
goto fail;
}
}
/* Test streaming API */
for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) {
for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) {
uint8_t hash[BLAKE2S_OUTBYTES];
blake2sp_state S;
uint8_t * p = buf;
size_t mlen = i;
int err = 0;
if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) {
goto fail;
}
while (mlen >= step) {
if ( (err = blake2sp_update(&S, p, step)) < 0 ) {
goto fail;
}
mlen -= step;
p += step;
}
if ( (err = blake2sp_update(&S, p, mlen)) < 0) {
goto fail;
}
if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) {
goto fail;
}
if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) {
goto fail;
}
}
}
puts( "ok" );
return 0;
fail:
puts("error");
return -1;
}
#endif
|
GB_unaryop__ainv_int32_fp32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_int32_fp32
// op(A') function: GB_tran__ainv_int32_fp32
// C type: int32_t
// A type: float
// cast: int32_t cij ; GB_CAST_SIGNED(cij,aij,32)
// unaryop: cij = -aij
#define GB_ATYPE \
float
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
int32_t z ; GB_CAST_SIGNED(z,x,32) ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_int32_fp32
(
int32_t *restrict Cx,
const float *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_int32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_dense_ewise3_noaccum_template.c | //------------------------------------------------------------------------------
// GB_dense_ewise3_noaccum_template: C = A+B where all 3 matrices are dense
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
#include "GB_unused.h"
{
//--------------------------------------------------------------------------
// get A, B, and C
//--------------------------------------------------------------------------
// any matrix may be aliased to any other (C==A, C==B, and/or A==B)
GB_ATYPE *Ax = A->x ;
GB_BTYPE *Bx = B->x ;
GB_CTYPE *Cx = C->x ;
const int64_t cnz = GB_NNZ (C) ;
ASSERT (GB_is_dense (A)) ;
ASSERT (GB_is_dense (B)) ;
ASSERT (GB_is_dense (C)) ;
int64_t p ;
//--------------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//--------------------------------------------------------------------------
if (C == B)
{
//----------------------------------------------------------------------
// C = A+C where A and C are dense
//----------------------------------------------------------------------
#if GB_HAS_CBLAS & GB_OP_IS_PLUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Ax, Cx, nthreads) ; // C += A
#elif GB_HAS_CBLAS & GB_OP_IS_MINUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Ax, Cx, nthreads) ; // C -= A
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = Ax [p]
GB_BINOP (GB_CX (p), aij, GB_CX (p)) ; // Cx [p] = aij + Cx [p]
}
#endif
}
else if (C == A)
{
//----------------------------------------------------------------------
// C = C+B where B and C are dense
//----------------------------------------------------------------------
#if GB_HAS_CBLAS & GB_OP_IS_PLUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B
#elif GB_HAS_CBLAS & GB_OP_IS_MINUS_REAL
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETB (bij, Bx, p) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), GB_CX (p), bij) ; // Cx [p] += bij
}
#endif
}
else
{
//----------------------------------------------------------------------
// C = A+B where all 3 matrices are dense
//----------------------------------------------------------------------
// note that A and B may still be aliased to each other
#if GB_HAS_CBLAS && GB_OP_IS_PLUS_REAL
GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A
GB_CBLAS_AXPY (cnz, (GB_CTYPE) 1, Bx, Cx, nthreads) ; // C += B
#elif GB_HAS_CBLAS && GB_OP_IS_MINUS_REAL
GB_memcpy (Cx, Ax, cnz * sizeof (GB_CTYPE), nthreads) ; // C = A
GB_CBLAS_AXPY (cnz, (GB_CTYPE) -1, Bx, Cx, nthreads) ; // C -= B
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < cnz ; p++)
{
GB_GETA (aij, Ax, p) ; // aij = Ax [p]
GB_GETB (bij, Bx, p) ; // bij = Bx [p]
GB_BINOP (GB_CX (p), aij, bij) ; // Cx [p] = aij + bij
}
#endif
}
}
|
ten_tusscher_2004_epi_S3_11.c | //Original Ten Tusscher
#include <assert.h>
#include <stdlib.h>
#include "ten_tusscher_2004_epi_S3_11.h"
GET_CELL_MODEL_DATA(init_cell_model_data) {
assert(cell_model);
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
//TODO: this should be called only once for the whole mesh, like in the GPU code
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) {
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3538982161893,0.00135046428174639,0.774395108378593,0.774193037406772,0.000180284593212202,0.482859468371359,0.00298553552627847,0.999998275725340,2.00383173002088e-08,1.94589509598028e-05,0.999770495567014,1.00670747236685,0.999986210307579,5.41917138483173e-05,0.573163916600714,10.5571119802004,138.986127120946};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) {
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++) {
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = i;
for (int j = 0; j < num_steps; ++j) {
solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu(real dt, real *sv, real stim_current) {
assert(sv);
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu(const real *sv, real *rDY_, real stim_current, real dt) {
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
//#ifdef EPI
real Gto=0.294;
//#endif
// #ifdef ENDO
// real Gto=0.073;
//#endif
//#ifdef MCELL
// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={14.5839110617731,7.41569871280206e-05,0.000142618003872546,0.000440201846466352,0.251484740046766,0.161220069616880,0.198425398975174,4.79810054951093,0.0150171852869386,1.46819606412675,1095.86075597311,0.000432497879172209,0.0916427631596964,0.0180337533245990,0.00326818952620740,4.71618903185368e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
///A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
atomic.c | /* Copyright (C) 2005-2017 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU Offloading and Multi Processing Library
(libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.
You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* This file contains helpers for the ATOMIC construct. */
#include "libgomp.h"
/* This mutex is used when atomic operations don't exist for the target
in the mode requested. The result is not globally atomic, but works so
long as all parallel references are within #pragma omp atomic directives.
According to responses received from omp@openmp.org, appears to be within
spec. Which makes sense, since that's how several other compilers
handle this situation as well. */
static gomp_mutex_t atomic_lock;
void
GOMP_atomic_start (void)
{
gomp_mutex_lock (&atomic_lock);
}
void
GOMP_atomic_end (void)
{
gomp_mutex_unlock (&atomic_lock);
}
#if !GOMP_MUTEX_INIT_0
static void __attribute__((constructor))
initialize_atomic (void)
{
gomp_mutex_init (&atomic_lock);
}
#endif
|
simulation.h | //! \file simulation.h
//! \brief Variables/functions related to a running simulation
#ifndef OPENMC_SIMULATION_H
#define OPENMC_SIMULATION_H
#include "openmc/particle.h"
#include <cstdint>
#include <vector>
namespace openmc {
constexpr int STATUS_EXIT_NORMAL {0};
constexpr int STATUS_EXIT_MAX_BATCH {1};
constexpr int STATUS_EXIT_ON_TRIGGER {2};
//==============================================================================
// Global variable declarations
//==============================================================================
namespace simulation {
extern "C" int current_batch; //!< current batch
extern "C" int current_gen; //!< current fission generation
extern "C" int64_t current_work; //!< index in source back of current particle
extern "C" bool initialized; //!< has simulation been initialized?
extern "C" double keff; //!< average k over batches
extern "C" double keff_std; //!< standard deviation of average k
extern "C" double k_col_abs; //!< sum over batches of k_collision * k_absorption
extern "C" double k_col_tra; //!< sum over batches of k_collision * k_tracklength
extern "C" double k_abs_tra; //!< sum over batches of k_absorption * k_tracklength
extern double log_spacing; //!< lethargy spacing for energy grid searches
extern "C" int n_lost_particles; //!< cumulative number of lost particles
extern "C" bool need_depletion_rx; //!< need to calculate depletion rx?
extern "C" int restart_batch; //!< batch at which a restart job resumed
extern "C" bool satisfy_triggers; //!< have tally triggers been satisfied?
extern "C" int total_gen; //!< total number of generations simulated
extern double total_weight; //!< Total source weight in a batch
extern int64_t work_per_rank; //!< number of particles per MPI rank
extern std::vector<double> k_generation;
extern std::vector<int64_t> work_index;
// Threadprivate variables
extern "C" bool trace; //!< flag to show debug information
#pragma omp threadprivate(current_work, trace)
} // namespace simulation
//==============================================================================
// Functions
//==============================================================================
//! Allocate space for source and fission banks
void allocate_banks();
//! Determine number of particles to transport per process
void calculate_work();
//! Initialize a batch
void initialize_batch();
//! Initialize a fission generation
void initialize_generation();
void initialize_history(Particle* p, int64_t index_source);
//! Finalize a batch
//!
//! Handles synchronization and accumulation of tallies, calculation of Shannon
//! entropy, getting single-batch estimate of keff, and turning on tallies when
//! appropriate
void finalize_batch();
//! Finalize a fission generation
void finalize_generation();
//! Determine overall generation number
extern "C" int overall_generation();
#ifdef OPENMC_MPI
void broadcast_results();
#endif
void free_memory_simulation();
} // namespace openmc
#endif // OPENMC_SIMULATION_H
|
pragma_example.c | /* To compile this program on Linux, try:
make CFLAGS='-std=c99 -Wall' pragma_example
To run:
./pragma_example; echo $?
It should print 0 if OK.
You can even compile it to run on multicore SMP for free with
make CFLAGS='-std=c99 -fopenmp -Wall' pragma_example
To verify there are really some clone() system calls that create the threads:
strace -f ./pragma_example ; echo $?
You can notice that the #pragma smecy are ignored (the project is
on-going :-) ) but that the program produces already correct results in
sequential execution and parallel OpenMP execution.
Enjoy!
Remi.Barrere@thalesgroup.com
Ronan.Keryell@hpc-project.com
for ARTEMIS SMECY European project.
*/
#include <stdbool.h>
/* function Gen
Example of old C89 array use-case where the size is unknown. Note that
this implies some nasty access linearization with array with more than
1 dimension.
*/
void Gen(int *out, int size) {
// Can be executed in parallel
#pragma omp parallel for
for (int i = 0; i < size; i++)
out [i] = 0;
}
/* function Add
Nice C99 array with dynamic size definition. Note this implies having
array size given first
*/
void Add(int size, int in[size], int out[size]) {
// Can be executed in parallel
#pragma omp parallel for
for (int i = 0; i < size; i++)
out [i] = in [i] + 1;
}
/* function Test */
bool Test(int size, int in[size]) {
bool ok = true;
/* Can be executed in parallel, ok is initialized from global value and
at loop exit ok is the && operation between all the local ok
instances: */
#pragma omp parallel for reduction(&&:ok)
for (int i = 0; i < size; i++)
/* We cannot have this simple code here:
if (in [i] != 2)
exit(-1) ;
because a loop or a fonction with exit() cannot be executed in parallel.
Proof: there is a parallel execution interleaving that may execute
some computations in some threads with a greater i that the one
executing the exit() done on another thread. So the causality is
not respected.
Anyway, in an heterogenous execution, just think about how to
implement the exit() operating system call from an
accelerator... No hope. :-)
So use a reduction instead and return the status for later
inspection:
*/
ok &= (in[i] == 2);
// Return false if at least one in[i] is not equal to 2:
return ok;
}
/* main */
int main(int argc, char* argv[]) {
int tab[6][200];
// Gen is mapped on GPP 0, it produced (out) an array written to arg 1:
#pragma smecy map(GPP, 0) arg(1, [6][200], out)
/* Note there is an array linearization here, since we give a 2D array
to Gen() that uses it . This is bad programming style, but it is just
to show it can be handled in the model :-) */
Gen((int *) tab, 200*6);
// Launch different things in parallel:
#pragma omp parallel sections
{
// Do one thing in parallel...
#pragma omp section
{
/* Map this "Add" call to PE 0, arg 2 is communicated as input as an
array of "int [3][200]", and after execution arg 3 is
communicated out as an array of "int [3][200]"
Note the aliasing of the 2 last arguments. Just to show we can
handle it. :*/
#pragma smecy map(PE, 0) arg(2, [3][200], in) arg(3, [3][200], out)
Add(200*3, (int *) tab, (int *) tab);
}
// ...with another thing
#pragma omp section
{
/* Map this "Add" call to PE 1, arg 2 is communicated as input as an
array of "int [3][200]" from address tab[3][0], that is the
second half of tab, and after execution arg 3 is communicated out
as an array of "int [3][200]", that is the second half of tab
Note the aliasing of the 2 last arguments. Just to show we can
handle it. :*/
#pragma smecy map(PE, 1) arg(2, [3][200], in) \
arg(3, [3][200], out)
Add(200*3, &tab[3][0], &tab[3][0]);
}
}
// Launch different things in parallel:
#pragma omp parallel sections
{
#pragma omp section
{
#pragma smecy map(PE, 2) arg(2, [2][200], in) arg(3, [2][200], out)
Add(200*2, (int *) tab, (int *) tab);
}
#pragma omp section
{
#pragma smecy map(PE, 3) arg(2, [2][200], in) arg(3, [2][200], out)
Add(200*2, &tab[2][0], &tab[2][0]);
}
#pragma omp section
{
#pragma smecy map(PE, 4) arg(2, [2][200], in) arg(3, [2][200], out)
Add(200*2, &tab[4][0], &tab[4][0]);
}
}
// An example where arg 2 is just used as a whole implicitly:
#pragma smecy map(GPP, 0) arg(2, in)
bool result = Test(200*6, (int *) tab);
// Return non 0 if the computation went wrong:
return !result;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.