source
stringlengths
3
92
c
stringlengths
26
2.25M
mandel_par.c
#include <stdlib.h> #include <stdio.h> #include <unistd.h> #include <time.h> #include <sys/time.h> #include "pngwriter.h" #include "consts.h" #include <omp.h> int main (int argc, char** argv) { png_data* pPng = png_create (IMAGE_WIDTH, IMAGE_HEIGHT); double x, y, x2, y2, cx, cy; cy = MIN_Y; double fDeltaX = (MAX_X - MIN_X) / (double) IMAGE_WIDTH; double fDeltaY = (MAX_Y - MIN_Y) / (double) IMAGE_HEIGHT; long nTotalIterationsCount = 0; unsigned long nTimeStart = get_time (); long i, j, n; n = 0; #pragma omp parallel for private(cx,cy,x,y,x2,y2,n,i,j) for (j = 0; j < IMAGE_HEIGHT; j++) { cy = MIN_Y + fDeltaY * j; cx = MIN_X; for (i = 0; i < IMAGE_WIDTH; i++){ x = cx; y = cy; x2 = x * x; y2 = y * y; // compute the orbit z, f(z), f²(z), f³(z), ... // count the iterations until the orbit leaves the circle |z|=2. // stop if the number of iterations exceeds the bound MAX_ITERS. for (n = 0; (n < MAX_ITERS) && (x2 + y2 < 4); n++) { y = 2 * x * y + cy; x = x2 - y2 + cx; x2 = x * x; y2 = y * y; } // n indicates if the point belongs to the mandelbrot set // plot the number of iterations at point (i, j) int c = ((long) n * 255) / MAX_ITERS; png_plot (pPng, i, j, c, c, c); nTotalIterationsCount++; cx += fDeltaX; } } unsigned long nTimeEnd = get_time (); // print benchmark data printf ("Total time: %g ms\n", (nTimeEnd - nTimeStart) / 1000.0); printf ("Image size: %ld x %ld = %ld Pixels\n", (long) IMAGE_WIDTH, (long) IMAGE_HEIGHT, (long) (IMAGE_WIDTH * IMAGE_HEIGHT)); printf ("Total number of iterations: %ld\n", nTotalIterationsCount); printf ("Avg. time per pixel: %g µs\n", (nTimeEnd - nTimeStart) / (double) (IMAGE_WIDTH * IMAGE_HEIGHT)); printf ("Avg. time per iteration: %g µs\n", (nTimeEnd - nTimeStart) / (double) nTotalIterationsCount); printf ("Iterations/second: %g\n", nTotalIterationsCount / (double) (nTimeEnd - nTimeStart) * 1e6); // assume there are 8 floating point operations per iteration printf ("MFlop/s: %g\n", nTotalIterationsCount * 8.0 / (double) (nTimeEnd - nTimeStart)); png_write (pPng, "mandel.png"); return 0; }
cpd.c
/** * @file cpd.c * @brief Tensor factorization with the CPD model using AO-ADMM. * @author Shaden Smith <shaden@cs.umn.edu> * @version 2.0.0 * @date 2016-05-14 */ /****************************************************************************** * INCLUDES *****************************************************************************/ #include <math.h> #include "cpd.h" #include "admm.h" #include "../csf.h" #include "../sptensor.h" #include "../mttkrp.h" #include "../timer.h" #include "../util.h" /****************************************************************************** * PRIVATE FUNCTIONS *****************************************************************************/ /****************************************************************************** * API FUNCTIONS *****************************************************************************/ splatt_error_type splatt_cpd( splatt_csf const * const tensor, splatt_idx_t rank, splatt_cpd_opts const * const cpd_options, splatt_global_opts const * const global_options, splatt_kruskal * factored) { /* allocate default options if they were not supplied */ splatt_global_opts * global_opts = (splatt_global_opts *) global_options; if(global_options == NULL) { global_opts = splatt_alloc_global_opts(); } splatt_cpd_opts * cpd_opts = (splatt_cpd_opts *) cpd_options; if(cpd_options == NULL) { cpd_opts = splatt_alloc_cpd_opts(); } splatt_omp_set_num_threads(global_opts->num_threads); /* allocate workspace */ cpd_ws * ws = cpd_alloc_ws(tensor, rank, cpd_opts, global_opts); /* perform the factorization! */ cpd_iterate(tensor, rank, ws, cpd_opts, global_opts, factored); /* clean up workspace */ cpd_free_ws(ws); /* free options if we had to allocate them */ if(global_options == NULL) { splatt_free_global_opts(global_opts); } if(cpd_options == NULL) { splatt_free_cpd_opts(cpd_opts); } return SPLATT_SUCCESS; } splatt_cpd_opts * splatt_alloc_cpd_opts(void) { splatt_cpd_opts * opts = splatt_malloc(sizeof(*opts)); /* defaults */ opts->tolerance = 1e-5; opts->max_iterations = 200; opts->inner_tolerance = 1e-2; opts->max_inner_iterations = 50; for(idx_t m=0; m < MAX_NMODES; ++m) { opts->chunk_sizes[m] = 50; opts->constraints[m] = splatt_alloc_constraint(SPLATT_CON_CLOSEDFORM); } return opts; } void splatt_free_cpd_opts( splatt_cpd_opts * opts) { /* free constraints */ for(idx_t m=0; m < MAX_NMODES; ++m) { splatt_free_constraint(opts->constraints[m]); } /* free options pointer */ splatt_free(opts); } splatt_kruskal * splatt_alloc_cpd( splatt_csf const * const csf, splatt_idx_t rank) { splatt_kruskal * cpd = splatt_malloc(sizeof(*cpd)); cpd->nmodes = csf->nmodes; cpd->rank = rank; cpd->lambda = splatt_malloc(rank * sizeof(*cpd->lambda)); for(idx_t m=0; m < csf->nmodes; ++m) { cpd->dims[m] = csf->dims[m]; cpd->factors[m] = splatt_malloc(csf->dims[m] * rank * sizeof(**cpd->factors)); /* TODO: allow custom initialization including NUMA aware */ fill_rand(cpd->factors[m], csf->dims[m] * rank); } /* initialize lambda in case it is not modified */ for(idx_t r=0; r < rank; ++r) { cpd->lambda[r] = 1.; } return cpd; } void splatt_free_cpd( splatt_kruskal * factored) { splatt_free(factored->lambda); for(idx_t m=0; m < factored->nmodes; ++m) { splatt_free(factored->factors[m]); } splatt_free(factored); } /****************************************************************************** * PUBLIC FUNCTIONS *****************************************************************************/ double cpd_iterate( splatt_csf const * const tensor, idx_t const rank, cpd_ws * const ws, splatt_cpd_opts * const cpd_opts, splatt_global_opts const * const global_opts, splatt_kruskal * factored) { idx_t const nmodes = tensor->nmodes; /* XXX: fix MTTKRP interface */ /* * The matrices used for MTTKRP. When using MPI, these may be larger than * the mats[:] matrices due to non-local indices. If the sizes are the same, * these are just aliases for mats[:]. */ matrix_t * mats[MAX_NMODES+1]; matrix_t * mttkrp_mats[MAX_NMODES+1]; for(idx_t m=0; m < tensor->nmodes; ++m) { mats[m] = mat_mkptr(factored->factors[m], tensor->dims[m], rank, 1); #ifdef SPLATT_USE_MPI /* setup local MTTKRP matrices */ #else mttkrp_mats[m] = mats[m]; #endif mat_normalize(mats[m], factored->lambda); } mats[MAX_NMODES] = ws->mttkrp_buf; mttkrp_mats[MAX_NMODES] = ws->mttkrp_buf; /* allow constraints to initialize */ cpd_init_constraints(cpd_opts, mats, nmodes); /* reset column weights */ val_t * const restrict norms = factored->lambda; for(idx_t r=0; r < rank; ++r) { norms[r] = 1.; } /* initialite aTa values */ for(idx_t m=1; m < nmodes; ++m) { mat_aTa(mats[m], ws->aTa[m]); } /* XXX TODO: CSF opts */ double * opts = splatt_default_opts(); /* MTTKRP ws */ splatt_mttkrp_ws * mttkrp_ws = splatt_mttkrp_alloc_ws(tensor, rank, opts); /* for tracking convergence */ double olderr = 1.; double err = 0.; double const ttnormsq = csf_frobsq(tensor); /* timers */ sp_timer_t itertime; sp_timer_t modetime[MAX_NMODES]; timer_start(&timers[TIMER_CPD]); val_t inner_its[MAX_NMODES]; /* foreach outer iteration */ for(idx_t it=0; it < cpd_opts->max_iterations; ++it) { timer_fstart(&itertime); /* foreach AO step */ for(idx_t m=0; m < nmodes; ++m) { timer_fstart(&modetime[m]); mttkrp_csf(tensor, mttkrp_mats, m, ws->thds, mttkrp_ws, global_opts); #ifdef SPLATT_USE_MPI /* exchange partial MTTKRP results */ #endif /* ADMM solve for constraints */ inner_its[m] = admm(m, mats, norms, ws, cpd_opts, global_opts); #ifdef SPLATT_USE_MPI /* exchange updated factor rows */ #endif /* prepare aTa for next mode */ #ifdef SPLATT_USE_MPI /* XXX use real comm */ mat_aTa_mpi(mats[m], ws->aTa[m], MPI_COMM_WORLD); #else mat_aTa(mats[m], ws->aTa[m]); #endif timer_stop(&modetime[m]); } /* foreach mode */ /* calculate outer convergence */ double const norm = cpd_norm(ws, norms); double const inner = cpd_innerprod(nmodes-1, ws, mats, norms); double const residual = ttnormsq + norm - (2 * inner); err = residual / ttnormsq; assert(err <= olderr); timer_stop(&itertime); /* print progress */ if(global_opts->verbosity > SPLATT_VERBOSITY_NONE) { printf(" its = %4"SPLATT_PF_IDX" (%0.3"SPLATT_PF_VAL"s) " "rel-errsq = %0.5"SPLATT_PF_VAL" delta = %+0.4e\n", it+1, itertime.seconds, err, err - olderr); if(global_opts->verbosity > SPLATT_VERBOSITY_LOW) { for(idx_t m=0; m < nmodes; ++m) { printf(" mode = %1"SPLATT_PF_IDX" (%0.3fs)", m+1, modetime[m].seconds); if(inner_its[m] > 0) { printf(" [%4.1"SPLATT_PF_VAL" ADMM its per row]", inner_its[m]); } printf("\n"); } } } /* terminate if converged */ if(it > 0 && fabs(olderr - err) < cpd_opts->tolerance) { break; } olderr = err; } /* absorb into lambda if no constraints/regularizations */ if(ws->unconstrained) { cpd_post_process(mats, norms, ws, cpd_opts, global_opts); } else { cpd_finalize_constraints(cpd_opts, mats, nmodes); } splatt_free(opts); for(idx_t m=0; m < tensor->nmodes; ++m) { /* free matrix memory if not an alias */ if(mttkrp_mats[m] != mats[m]) { mat_free(mttkrp_mats[m]); } /* only free ptr */ splatt_free(mats[m]); } splatt_mttkrp_free_ws(mttkrp_ws); timer_stop(&timers[TIMER_CPD]); factored->fit = 1 - err; return err; } void cpd_post_process( matrix_t * * mats, val_t * const column_weights, cpd_ws * const ws, splatt_cpd_opts const * const cpd_opts, splatt_global_opts const * const global_opts) { idx_t const rank = mats[0]->J; val_t * tmp = splatt_malloc(rank * sizeof(*tmp)); /* normalize each matrix and adjust lambda */ for(idx_t m=0; m < ws->nmodes; ++m) { mat_normalize(mats[m], tmp); for(idx_t f=0; f < rank; ++f) { column_weights[f] *= tmp[f]; } } splatt_free(tmp); } cpd_ws * cpd_alloc_ws( splatt_csf const * const tensor, idx_t rank, splatt_cpd_opts const * const cpd_opts, splatt_global_opts const * const global_opts) { idx_t const nmodes = tensor->nmodes; cpd_ws * ws = splatt_malloc(sizeof(*ws)); ws->nmodes = nmodes; for(idx_t m=0; m < nmodes; ++m) { ws->aTa[m] = mat_alloc(rank, rank); } ws->aTa_buf = mat_alloc(rank, rank); ws->gram = mat_alloc(rank, rank); ws->nthreads = global_opts->num_threads; ws->thds = thd_init(ws->nthreads, 3, (rank * rank * sizeof(val_t)) + 64, 0, (nmodes * rank * sizeof(val_t)) + 64); /* MTTKRP space */ idx_t const maxdim = tensor->dims[argmax_elem(tensor->dims, nmodes)]; ws->mttkrp_buf = mat_alloc(maxdim, rank); /* Setup structures needed for constraints. */ ws->unconstrained = true; for(idx_t m=0; m < nmodes; ++m) { /* allocate duals if we need to perform ADMM */ if(cpd_opts->constraints[m]->solve_type != SPLATT_CON_CLOSEDFORM) { ws->duals[m] = mat_zero(tensor->dims[m], rank); } else { ws->duals[m] = NULL; } if(strcmp(cpd_opts->constraints[m]->description, "UNCONSTRAINED") != 0) { ws->unconstrained = false; } } if(ws->unconstrained) { ws->auxil = NULL; ws->mat_init = NULL; } else { ws->auxil = mat_alloc(maxdim, rank); ws->mat_init = mat_alloc(maxdim, rank); } return ws; } cpd_ws * cpd_alloc_ws_empty( idx_t const nmodes, idx_t const rank, splatt_cpd_opts const * const cpd_opts, splatt_global_opts const * const global_opts) { cpd_ws * ws = splatt_malloc(sizeof(*ws)); ws->nmodes = nmodes; for(idx_t m=0; m < nmodes; ++m) { ws->aTa[m] = mat_zero(rank, rank); } ws->aTa_buf = mat_zero(rank, rank); ws->gram = mat_zero(rank, rank); ws->nthreads = global_opts->num_threads; ws->thds = thd_init(ws->nthreads, 3, (rank * rank * sizeof(val_t)) + 64, 0, (nmodes * rank * sizeof(val_t)) + 64); /* MTTKRP space */ ws->mttkrp_buf = NULL; /* Setup structures needed for constraints. */ ws->unconstrained = true; for(idx_t m=0; m < nmodes; ++m) { ws->duals[m] = NULL; if(strcmp(cpd_opts->constraints[m]->description, "UNCONSTRAINED") != 0) { ws->unconstrained = false; } } ws->auxil = NULL; ws->mat_init = NULL; return ws; } void cpd_free_ws( cpd_ws * const ws) { mat_free(ws->mttkrp_buf); mat_free(ws->aTa_buf); mat_free(ws->gram); mat_free(ws->auxil); mat_free(ws->mat_init); for(idx_t m=0; m < ws->nmodes; ++m) { mat_free(ws->aTa[m]); mat_free(ws->duals[m]); } thd_free(ws->thds, ws->nthreads); splatt_free(ws); } val_t cpd_norm( cpd_ws const * const ws, val_t const * const restrict column_weights) { idx_t const rank = ws->aTa[0]->J; val_t * const restrict scratch = ws->aTa_buf->vals; /* initialize scratch space */ for(idx_t i=0; i < rank; ++i) { for(idx_t j=i; j < rank; ++j) { scratch[j + (i*rank)] = 1.; } } /* scratch = hada(aTa) */ for(idx_t m=0; m < ws->nmodes; ++m) { val_t const * const restrict atavals = ws->aTa[m]->vals; for(idx_t i=0; i < rank; ++i) { for(idx_t j=i; j < rank; ++j) { scratch[j + (i*rank)] *= atavals[j + (i*rank)]; } } } /* now compute weights^T * aTa[MAX_NMODES] * weights */ val_t norm = 0; for(idx_t i=0; i < rank; ++i) { norm += scratch[i+(i*rank)] * column_weights[i] * column_weights[i]; for(idx_t j=i+1; j < rank; ++j) { norm += scratch[j+(i*rank)] * column_weights[i] * column_weights[j] * 2; } } return fabs(norm); } val_t cpd_innerprod( idx_t lastmode, cpd_ws const * const ws, matrix_t * * mats, val_t const * const restrict column_weights) { idx_t const nrows = mats[lastmode]->I; idx_t const rank = mats[0]->J; val_t const * const newmat = mats[lastmode]->vals; val_t const * const mttkrp = ws->mttkrp_buf->vals; val_t myinner = 0; #pragma omp parallel reduction(+:myinner) { int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = ws->thds[tid].scratch[0]; for(idx_t r=0; r < rank; ++r) { accumF[r] = 0.; } /* Hadamard product with newest factor and previous MTTKRP */ #pragma omp for schedule(static) for(idx_t i=0; i < nrows; ++i) { val_t const * const restrict newmat_row = newmat + (i*rank); val_t const * const restrict mttkrp_row = mttkrp + (i*rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += newmat_row[r] * mttkrp_row[r]; } } /* accumulate everything into 'myinner' */ for(idx_t r=0; r < rank; ++r) { myinner += accumF[r] * column_weights[r]; } } /* end omp parallel -- reduce myinner */ /* TODO AllReduce for MPI support */ return myinner; } val_t kruskal_norm( splatt_kruskal const * const kruskal) { idx_t const rank = kruskal->rank; val_t * const scratch = (val_t *) splatt_malloc(rank * rank * sizeof(*scratch)); matrix_t * ata = mat_zero(rank, rank); /* initialize scratch space */ for(idx_t i=0; i < rank; ++i) { for(idx_t j=i; j < rank; ++j) { scratch[j + (i*rank)] = 1.; } } /* scratch = hada(aTa) */ for(idx_t m=0; m < kruskal->nmodes; ++m) { matrix_t matptr; mat_fillptr(&matptr, kruskal->factors[m], kruskal->dims[m], rank, 1); mat_aTa(&matptr, ata); val_t const * const restrict atavals = ata->vals; for(idx_t i=0; i < rank; ++i) { for(idx_t j=i; j < rank; ++j) { scratch[j + (i*rank)] *= atavals[j + (i*rank)]; } } } /* now compute weights^T * aTa[MAX_NMODES] * weights */ val_t norm = 0; val_t const * const column_weights = kruskal->lambda; for(idx_t i=0; i < rank; ++i) { norm += scratch[i+(i*rank)] * column_weights[i] * column_weights[i]; for(idx_t j=i+1; j < rank; ++j) { norm += scratch[j+(i*rank)] * column_weights[i] * column_weights[j] * 2; } } splatt_free(scratch); mat_free(ata); return fabs(norm); } double cpd_error( sptensor_t const * const tensor, splatt_kruskal const * const factored) { timer_start(&timers[TIMER_FIT]); /* find the smallest mode for MTTKRP */ idx_t const smallest_mode = argmin_elem(tensor->dims, tensor->nmodes); idx_t const nrows = tensor->dims[smallest_mode]; idx_t const rank = factored->rank; /* * MTTKRP */ matrix_t * mat_ptrs[MAX_NMODES+1]; for(idx_t m=0; m < factored->nmodes; ++m) { mat_ptrs[m] = mat_mkptr(factored->factors[m], factored->dims[m], rank, 1); } mat_ptrs[MAX_NMODES] = mat_alloc(nrows, rank); mttkrp_stream(tensor, mat_ptrs, smallest_mode); val_t const * const smallmat = factored->factors[smallest_mode]; val_t const * const mttkrp = mat_ptrs[MAX_NMODES]->vals; /* * inner product between tensor and factored */ double inner = 0; #pragma omp parallel reduction(+:inner) { int const tid = splatt_omp_get_thread_num(); val_t * const restrict accumF = splatt_malloc(rank * sizeof(*accumF)); for(idx_t r=0; r < rank; ++r) { accumF[r] = 0.; } /* Hadamard product with newest factor and previous MTTKRP */ #pragma omp for schedule(static) for(idx_t i=0; i < nrows; ++i) { val_t const * const restrict smallmat_row = smallmat + (i*rank); val_t const * const restrict mttkrp_row = mttkrp + (i*rank); for(idx_t r=0; r < rank; ++r) { accumF[r] += smallmat_row[r] * mttkrp_row[r]; } } /* accumulate everything into 'inner' */ for(idx_t r=0; r < rank; ++r) { inner += accumF[r] * factored->lambda[r]; } splatt_free(accumF); } /* end omp parallel -- reduce myinner */ double const Xnormsq = tt_normsq(tensor); double const Znormsq = kruskal_norm(factored); double const residual = sqrt(Xnormsq + Znormsq - (2 * inner)); double const err = residual / sqrt(Xnormsq); #if 0 printf("\n"); printf("Xnormsq: %e Znormsq: %e inner: %e\n", Xnormsq, Znormsq, inner); #endif /* cleanup */ mat_free(mat_ptrs[MAX_NMODES]); for(idx_t m=0; m < factored->nmodes; ++m) { /* just the ptr */ splatt_free(mat_ptrs[m]); } timer_stop(&timers[TIMER_FIT]); return err; }
openmp-macro-expansion.c
// RUN: %clang_cc1 -fopenmp -E -o - %s 2>&1 | FileCheck %s // This is to make sure the pragma name is not expanded! #define omp (0xDEADBEEF) #define N 2 #define M 1 #define E N> #define map_to_be_expanded(x) map(tofrom:x) #define sched_to_be_expanded(x,s) schedule(x,s) #define reda_to_be_expanded(x) reduction(+:x) #define redb_to_be_expanded(x,op) reduction(op:x) void foo(int *a, int *b) { //CHECK: omp target map(a[0:2]) map(tofrom:b[0:2*1]) #pragma omp target map(a[0:N]) map_to_be_expanded(b[0:2*M]) { int reda; int redb; //CHECK: omp parallel for schedule(static,2> >1) reduction(+:reda) reduction(*:redb) #pragma omp parallel for sched_to_be_expanded(static, E>1) \ reda_to_be_expanded(reda) redb_to_be_expanded(redb,*) for (int i = 0; i < N; ++i) { reda += a[i]; redb += b[i]; } a[0] = reda; b[0] = redb; } }
mpi_io.c
/****************************************************************************** * INCLUDES *****************************************************************************/ #include "../splatt_mpi.h" #include "../io.h" #include "../timer.h" #include "../util.h" /****************************************************************************** * API FUNCTONS *****************************************************************************/ int splatt_mpi_csf_load( char const * const fname, splatt_idx_t * nmodes, splatt_csf ** tensors, double const * const options, MPI_Comm comm) { sptensor_t * tt = NULL; int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); return SPLATT_SUCCESS; } int splatt_mpi_coord_load( char const * const fname, splatt_idx_t * nmodes, splatt_idx_t * nnz, splatt_idx_t *** inds, splatt_val_t ** vals, double const * const options, MPI_Comm comm) { sptensor_t * tt = mpi_simple_distribute(fname, comm); if(tt == NULL) { *nmodes = 0; *nnz = 0; *vals = NULL; *inds = NULL; return SPLATT_ERROR_BADINPUT; } *nmodes = tt->nmodes; *nnz = tt->nnz; /* copy to output */ *vals = tt->vals; *inds = splatt_malloc(tt->nmodes * sizeof(**inds)); for(idx_t m=0; m < tt->nmodes; ++m) { (*inds)[m] = tt->ind[m]; } free(tt); return SPLATT_SUCCESS; } /****************************************************************************** * PRIVATE FUNCTONS *****************************************************************************/ /** * @brief Fill buf with the next 'nnz_to_read' tensor values. * * @param fin The file to read from. * @param buf The sptensor buffer to fill. * @param nnz_to_read The number of nonzeros to read. */ static void p_fill_tt_nnz( FILE * fin, sptensor_t * const buf, idx_t const * const offset, idx_t const nnz_to_read) { idx_t const nmodes = buf->nmodes; char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; idx_t nnzread = 0; while(nnzread < nnz_to_read && (read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10); buf->ind[m][nnzread] = ind - offset[m]; } val_t const v = strtod(ptr, &ptr); buf->vals[nnzread++] = v; } } } static int * p_distribute_parts( sptensor_t * const ttbuf, char const * const pfname, rank_info * const rinfo) { /* root may have more than target_nnz */ idx_t const target_nnz = rinfo->global_nnz / rinfo->npes; int * parts = (int *) splatt_malloc(SS_MAX(ttbuf->nnz, target_nnz) * sizeof(int)); if(rinfo->rank == 0) { int ret; FILE * fin = open_f(pfname, "r"); /* send to all other ranks */ for(int p=1; p < rinfo->npes; ++p) { /* read into buffer */ for(idx_t n=0; n < target_nnz; ++n) { if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) { fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", pfname); exit(1); } } MPI_Send(parts, target_nnz, MPI_INT, p, 0, rinfo->comm_3d); } /* now read my own part info */ for(idx_t n=0; n < ttbuf->nnz; ++n) { if((ret = fscanf(fin, "%d", &(parts[n]))) == 0) { fprintf(stderr, "SPLATT ERROR: not enough elements in '%s'\n", pfname); exit(1); } } fclose(fin); } else { /* receive part info */ MPI_Recv(parts, ttbuf->nnz, MPI_INT, 0, 0, rinfo->comm_3d, &(rinfo->status)); } return parts; } static void p_find_my_slices_1d( idx_t ** const ssizes, idx_t const nmodes, idx_t const nnz, rank_info * const rinfo) { idx_t const * const dims = rinfo->global_dims; /* find start/end slices for my partition */ for(idx_t m=0; m < nmodes; ++m) { /* current processor */ int currp = 0; idx_t lastn = 0; idx_t nnzcnt = 0; idx_t pnnz = nnz / rinfo->npes; rinfo->layer_starts[m] = 0; rinfo->layer_ends[m] = dims[m]; rinfo->mat_start[m] = 0; rinfo->mat_end[m] = dims[m]; for(idx_t s=0; s < dims[m]; ++s) { if(nnzcnt >= lastn + pnnz) { /* choose this slice or the previous, whichever is closer */ if(s > 0) { idx_t const thisdist = nnzcnt - (lastn + pnnz); idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]); if(prevdist < thisdist) { lastn = nnzcnt - ssizes[m][s-1]; } else { lastn = nnzcnt; } } else { lastn = nnzcnt; } ++currp; /* adjust target nnz based on what is left */ pnnz = (nnz - lastn) / SS_MAX(1, rinfo->npes - currp); if(currp == rinfo->rank) { rinfo->mat_start[m] = s; } else if(currp == rinfo->rank+1 && currp != rinfo->npes) { /* only set mat_end if we aren't at the end of the tensor */ rinfo->mat_end[m] = s; break; } } nnzcnt += ssizes[m][s]; if(rinfo->rank == rinfo->npes-1) { assert(rinfo->mat_end[m] == rinfo->global_dims[m]); } } /* it is possible to have a very small dimension and too many ranks */ if(rinfo->npes > 1 && rinfo->mat_start[m] == 0 && rinfo->mat_end[m] == dims[m]) { fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\ SPLATT_PF_IDX".\n", rinfo->rank, m+1); rinfo->mat_start[m] = dims[m]; rinfo->mat_end[m] = dims[m]; } } } /** * @brief Count the nonzero values in a partition of X. * * @param fname The name of the file containing X. * @param nmodes The number of modes of X. * * @return The number of nonzeros in the intersection of all sstarts and sends. */ static idx_t p_count_my_nnz_1d( char const * const fname, idx_t const nmodes, idx_t const * const sstarts, idx_t const * const sends) { FILE * fin = open_f(fname, "r"); char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; /* count nnz in my partition */ idx_t mynnz = 0; while((read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { int mine = 0; ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10) - 1; /* I own the nnz if it falls in any of my slices */ if(ind >= sstarts[m] && ind < sends[m]) { mine = 1; break; } } if(mine) { ++mynnz; } /* skip over tensor val */ strtod(ptr, &ptr); } } fclose(fin); free(line); return mynnz; } /** * @brief Read a partition of X into tt. * * @param fname The file containing X. * @param tt The tensor structure (must be pre-allocated). * @param sstarts Array of starting slices, inclusive (one for each mode). * @param sends Array of ending slices, exclusive (one for each mode). */ static void p_read_tt_part_1d( char const * const fname, sptensor_t * const tt, idx_t const * const sstarts, idx_t const * const sends) { idx_t const nnz = tt->nnz; idx_t const nmodes = tt->nmodes; char * ptr = NULL; char * line = NULL; ssize_t read; size_t len = 0; FILE * fin = open_f(fname, "r"); idx_t nnzread = 0; while(nnzread < nnz && (read = getline(&line, &len, fin)) != -1) { /* skip empty and commented lines */ if(read > 1 && line[0] != '#') { int mine = 0; ptr = line; for(idx_t m=0; m < nmodes; ++m) { idx_t ind = strtoull(ptr, &ptr, 10) - 1; tt->ind[m][nnzread] = ind; if(ind >= sstarts[m] && ind < sends[m]) { mine = 1; } } tt->vals[nnzread] = strtod(ptr, &ptr); if(mine) { ++nnzread; } } } fclose(fin); free(line); } /** * @brief Read my portion of X from a file. * * @param fname The file containing X. * @param ssizes The nonzero counts in each slice. * @param nmodes The number of modes in X. * @param rinfo MPI information (nnz, 3D comm, etc.). * * @return My portion of the sparse tensor read from fname. */ static sptensor_t * p_read_tt_1d( char const * const fname, idx_t ** const ssizes, idx_t const nmodes, rank_info * const rinfo) { int const rank = rinfo->rank; idx_t const nnz = rinfo->global_nnz; idx_t const * const dims = rinfo->global_dims; /* find start/end slices for my partition */ p_find_my_slices_1d(ssizes, nmodes, nnz, rinfo); /* count nnz in my partition and allocate */ idx_t const mynnz = p_count_my_nnz_1d(fname, nmodes, rinfo->mat_start, rinfo->mat_end); sptensor_t * tt = tt_alloc(mynnz, nmodes); /* now actually load values */ p_read_tt_part_1d(fname, tt, rinfo->mat_start, rinfo->mat_end); return tt; } /** * @brief Find the boundaries for a process layer. * * @param ssizes The number of nonzeros found in each index (of each mode). * ssizes[1][5] is the number of nonzeros in X(:,5,:). * @param mode Which mode to work on. * @param rinfo MPI rank information. */ static void p_find_layer_boundaries( idx_t ** const ssizes, idx_t const mode, rank_info * const rinfo) { idx_t const * const dims = rinfo->global_dims; idx_t const nnz = rinfo->global_nnz; idx_t const m = mode; /* find start/end slices for my partition */ int const layer_dim = rinfo->dims_3d[m]; idx_t pnnz = nnz / layer_dim; /* nnz in a layer */ /* current processor */ int currp = 0; idx_t lastn = 0; idx_t nnzcnt = ssizes[m][0]; /* initialize layer_ptrs */ rinfo->layer_ptrs[m] = splatt_malloc((layer_dim+1) * sizeof(**(rinfo->layer_ptrs))); rinfo->layer_ptrs[m][currp++] = 0; rinfo->layer_ptrs[m][layer_dim] = dims[m]; if(layer_dim == 1) { goto CLEANUP; return; } /* foreach slice */ for(idx_t s=1; s < dims[m]; ++s) { /* if we have passed the next layer boundary */ if(nnzcnt >= lastn + pnnz) { /* choose this slice or the previous, whichever is closer */ idx_t const thisdist = nnzcnt - (lastn + pnnz); idx_t const prevdist = (lastn + pnnz) - (nnzcnt - ssizes[m][s-1]); if(prevdist < thisdist) { lastn = nnzcnt - ssizes[m][s-1]; /* see below comment */ //rinfo->layer_ptrs[m][currp++] = s-1; } else { lastn = nnzcnt; //rinfo->layer_ptrs[m][currp++] = s; } /* Always choosing s but marking lastn with s-1 leads to better balance * and communication volume. This is totally a heuristic. */ rinfo->layer_ptrs[m][currp++] = s; /* exit early if we placed the last rank */ if(currp == layer_dim) { break; } /* adjust target nnz based on what is left */ pnnz = (nnz - lastn) / SS_MAX(1, layer_dim - (currp-1)); } nnzcnt += ssizes[m][s]; } CLEANUP: /* store layer bounderies in layer_{starts, ends} */ rinfo->layer_starts[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m]]; rinfo->layer_ends[m] = rinfo->layer_ptrs[m][rinfo->coords_3d[m] + 1]; /* it is possible to have a very small dimension and too many ranks */ if(rinfo->dims_3d[m] > 1 && rinfo->layer_ends[m] - rinfo->layer_starts[m] == dims[m]) { fprintf(stderr, "SPLATT: rank: %d too many MPI ranks for mode %"\ SPLATT_PF_IDX".\n", rinfo->rank, m+1); rinfo->layer_starts[m] = dims[m]; rinfo->layer_ends[m] = dims[m]; } } /** * @brief Rearrange nonzeros according to a medium-grained decomposition. * * @param ttbuf The tensor to rearrange. * @param ssizes The number of nonzeros found in each index. * @param rinfo MPI rank information. * * @return My owned tensor nonzeros. */ static sptensor_t * p_rearrange_medium( sptensor_t * const ttbuf, idx_t * * ssizes, rank_info * const rinfo) { #pragma omp parallel for schedule(static, 1) for(idx_t m=0; m < ttbuf->nmodes; ++m) { p_find_layer_boundaries(ssizes, m, rinfo); } /* create partitioning */ int * parts = splatt_malloc(ttbuf->nnz * sizeof(*parts)); #pragma omp parallel for schedule(static) for(idx_t n=0; n < ttbuf->nnz; ++n) { parts[n] = mpi_determine_med_owner(ttbuf, n, rinfo); } sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d); splatt_free(parts); return tt; } /** * @brief Rearrange nonzeros according to a medium-grained decomposition. * * @param ttbuf The tensor to rearrange. * @param pfname The filename containing the partitioning information. * @param ssizes The number of nonzeros found in each index. * @param rinfo MPI rank information. * * @return My owned tensor nonzeros. */ static sptensor_t * p_rearrange_fine( sptensor_t * const ttbuf, char const * const pfname, idx_t * * ssizes, rank_info * const rinfo) { /* first distribute partitioning information */ int * parts = p_distribute_parts(ttbuf, pfname, rinfo); sptensor_t * tt = mpi_rearrange_by_part(ttbuf, parts, rinfo->comm_3d); free(parts); return tt; } /** * @brief Count the nonzeros in each slice of X. * * @param tt My subtensor. * @param ssizes A 2D array for counting slice 'sizes'. * @param rinfo MPI information (containing global dims, nnz, etc.). */ static void p_fill_ssizes( sptensor_t const * const tt, idx_t ** const ssizes, rank_info const * const rinfo) { for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const ind = tt->ind[m]; for(idx_t n=0; n < tt->nnz; ++n) { ssizes[m][ind[n]] += 1; } /* reduce to get total slice counts */ MPI_Allreduce(MPI_IN_PLACE, ssizes[m], (int) rinfo->global_dims[m], SPLATT_MPI_IDX, MPI_SUM, rinfo->comm_3d); } } /** * @brief Fill in the best MPI dimensions we can find. The truly optimal * solution should involve the tensor's sparsity pattern, but in general * this works as good (but usually better) than the hand-tuned dimensions * that we tried. * * @param rinfo MPI rank information. */ static void p_get_best_mpi_dim( rank_info * const rinfo) { int nprimes = 0; int * primes = get_primes(rinfo->npes, &nprimes); idx_t total_size = 0; for(idx_t m=0; m < rinfo->nmodes; ++m) { total_size += rinfo->global_dims[m]; /* reset mpi dims */ rinfo->dims_3d[m] = 1; } idx_t target = total_size / (idx_t)rinfo->npes; long diffs[MAX_NMODES]; /* start from the largest prime */ for(int p = nprimes-1; p >= 0; --p) { int furthest = 0; /* find dim furthest from target */ for(idx_t m=0; m < rinfo->nmodes; ++m) { /* distance is current - target */ idx_t const curr = rinfo->global_dims[m] / rinfo->dims_3d[m]; /* avoid underflow */ diffs[m] = (curr > target) ? (curr - target) : 0; if(diffs[m] > diffs[furthest]) { furthest = m; } } /* assign p processes to furthest mode */ rinfo->dims_3d[furthest] *= primes[p]; } free(primes); } /** * @brief Read a sparse tensor in coordinate form from a text file and * and distribute among MPI ranks. * * @param fin The file to read from. * @param comm The MPI communicator to distribute among. * * @return The sparse tensor. */ static sptensor_t * p_tt_mpi_read_file( FILE * fin, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); idx_t dims[MAX_NMODES]; idx_t offsets[MAX_NMODES]; idx_t global_nnz; idx_t nmodes; sptensor_t * tt = NULL; if(rank == 0) { /* send dimension info */ tt_get_dims(fin, &nmodes, &global_nnz, dims, offsets); rewind(fin); MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } else { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } /* compute my even chunk of nonzeros -- root rank gets the extra amount */ idx_t const target_nnz = global_nnz / npes; idx_t my_nnz = target_nnz; if(rank == 0) { my_nnz = global_nnz - ((npes-1) * my_nnz); } /* read/send all chunks */ if(rank == 0) { sptensor_t * tt_buf = tt_alloc(target_nnz, nmodes); /* now send to everyone else */ for(int p=1; p < npes; ++p) { p_fill_tt_nnz(fin, tt_buf, offsets, target_nnz); for(idx_t m=0; m < tt_buf->nmodes; ++m) { MPI_Send(tt_buf->ind[m], target_nnz, SPLATT_MPI_IDX, p, m, comm); } MPI_Send(tt_buf->vals, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm); } tt_free(tt_buf); /* load my own */ tt = tt_alloc(my_nnz, nmodes); p_fill_tt_nnz(fin, tt, offsets, my_nnz); } else { MPI_Status status; /* receive my chunk */ tt = tt_alloc(my_nnz, nmodes); for(idx_t m=0; m < tt->nmodes; ++m) { MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status); } MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status); } return tt; } /** * @brief Read a sparse tensor in coordinate form from a binary file and * distribute among MPI ranks. * * @param fin The file to read from. * @param comm The MPI communicator to distribute among. * * @return The sparse tensor. */ static sptensor_t * p_tt_mpi_read_binary_file( FILE * fin, MPI_Comm comm) { sptensor_t * tt = NULL; int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); idx_t global_nnz; idx_t nmodes; idx_t dims[MAX_NMODES]; /* get header and tensor stats */ bin_header header; if(rank == 0) { read_binary_header(fin, &header); fill_binary_idx(&nmodes, 1, &header, fin); fill_binary_idx(dims, nmodes, &header, fin); fill_binary_idx(&global_nnz, 1, &header, fin); } /* send dimension info */ if(rank == 0) { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } else { MPI_Bcast(&nmodes, 1, SPLATT_MPI_IDX, 0, comm); MPI_Bcast(&global_nnz, 1, SPLATT_MPI_IDX, 0, comm); } /* sanity check */ if(nmodes > MAX_NMODES) { if(rank == 0) { fprintf(stderr, "SPLATT ERROR: maximum %"SPLATT_PF_IDX" modes supported. " "Found %"SPLATT_PF_IDX". Please recompile with " "MAX_NMODES=%"SPLATT_PF_IDX".\n", MAX_NMODES, nmodes, nmodes); } return NULL; } /* compute my even chunk of nonzeros -- root rank gets the extra amount */ idx_t const target_nnz = global_nnz / npes; idx_t my_nnz = target_nnz; if(rank == 0) { my_nnz = global_nnz - ((npes-1)* target_nnz); } tt = tt_alloc(my_nnz, nmodes); /* read/send all chunks */ if(rank == 0) { /* handle inds */ idx_t * ibuf = splatt_malloc(target_nnz * sizeof(idx_t)); for(idx_t m=0; m < nmodes; ++m) { for(int p=1; p < npes; ++p) { fill_binary_idx(ibuf, target_nnz, &header, fin); MPI_Send(ibuf, target_nnz, SPLATT_MPI_IDX, p, m, comm); } /* load my own */ fill_binary_idx(tt->ind[m], my_nnz, &header, fin); } splatt_free(ibuf); /* now vals */ val_t * vbuf = splatt_malloc(target_nnz * sizeof(val_t)); for(int p=1; p < npes; ++p) { fill_binary_val(vbuf, target_nnz, &header, fin); MPI_Send(vbuf, target_nnz, SPLATT_MPI_VAL, p, nmodes, comm); } splatt_free(vbuf); /* finally, load my own vals */ fill_binary_val(tt->vals, my_nnz, &header, fin); } else { /* non-root ranks just recv */ MPI_Status status; /* receive my chunk */ for(idx_t m=0; m < tt->nmodes; ++m) { MPI_Recv(tt->ind[m], my_nnz, SPLATT_MPI_IDX, 0, m, comm, &status); } MPI_Recv(tt->vals, my_nnz, SPLATT_MPI_VAL, 0, nmodes, comm, &status); } return tt; } /****************************************************************************** * PUBLIC FUNCTONS *****************************************************************************/ sptensor_t * mpi_tt_read( char const * const ifname, char const * const pfname, rank_info * const rinfo) { timer_start(&timers[TIMER_IO]); /* first just make sure it exists */ FILE * fin; if((fin = fopen(ifname, "r")) == NULL) { if(rinfo->rank == 0) { fprintf(stderr, "SPLATT ERROR: failed to open '%s'\n", ifname); } return NULL; } fclose(fin); /* first naively distribute tensor nonzeros for analysis */ sptensor_t * ttbuf = mpi_simple_distribute(ifname, MPI_COMM_WORLD); rinfo->nmodes = ttbuf->nmodes; MPI_Allreduce(&(ttbuf->nnz), &(rinfo->global_nnz), 1, SPLATT_MPI_IDX, MPI_SUM, MPI_COMM_WORLD); MPI_Allreduce(ttbuf->dims, &(rinfo->global_dims), ttbuf->nmodes, SPLATT_MPI_IDX, MPI_MAX, MPI_COMM_WORLD); /* first compute MPI dimension if not specified by the user */ if(rinfo->decomp == DEFAULT_MPI_DISTRIBUTION) { rinfo->decomp = SPLATT_DECOMP_MEDIUM; p_get_best_mpi_dim(rinfo); } mpi_setup_comms(rinfo); /* count # nonzeros found in each index */ idx_t * ssizes[MAX_NMODES]; for(idx_t m=0; m < ttbuf->nmodes; ++m) { ssizes[m] = (idx_t *) calloc(rinfo->global_dims[m], sizeof(idx_t)); } p_fill_ssizes(ttbuf, ssizes, rinfo); /* actually parse tensor */ sptensor_t * tt = NULL; switch(rinfo->decomp) { case SPLATT_DECOMP_COARSE: tt = p_read_tt_1d(ifname, ssizes, ttbuf->nmodes, rinfo); /* now fix tt->dims */ for(idx_t m=0; m < tt->nmodes; ++m) { tt->dims[m] = 0; for(idx_t n=0; n < tt->nnz; ++n) { tt->dims[m] = SS_MAX(tt->dims[m], tt->ind[m][n] + 1); } } break; case SPLATT_DECOMP_MEDIUM: tt = p_rearrange_medium(ttbuf, ssizes, rinfo); /* now map tensor indices to local (layer) coordinates and fill in dims */ #pragma omp parallel for schedule(static, 1) for(idx_t m=0; m < ttbuf->nmodes; ++m) { tt->dims[m] = rinfo->layer_ends[m] - rinfo->layer_starts[m]; for(idx_t n=0; n < tt->nnz; ++n) { assert(tt->ind[m][n] >= rinfo->layer_starts[m]); assert(tt->ind[m][n] < rinfo->layer_ends[m]); tt->ind[m][n] -= rinfo->layer_starts[m]; } } break; case SPLATT_DECOMP_FINE: tt = p_rearrange_fine(ttbuf, pfname, ssizes, rinfo); /* now fix tt->dims */ for(idx_t m=0; m < tt->nmodes; ++m) { tt->dims[m] = rinfo->global_dims[m]; rinfo->layer_ends[m] = tt->dims[m]; } break; } for(idx_t m=0; m < ttbuf->nmodes; ++m) { free(ssizes[m]); } tt_free(ttbuf); timer_stop(&timers[TIMER_IO]); return tt; } void mpi_filter_tt_1d( idx_t const mode, sptensor_t const * const tt, sptensor_t * const ftt, idx_t start, idx_t end) { assert(ftt != NULL); for(idx_t m=0; m < ftt->nmodes; ++m) { ftt->dims[m] = tt->dims[m]; } idx_t const olds = start; idx_t const olde = end; /* Adjust start and end if tt has been compressed. */ assert(start != end); if(tt->indmap[mode] != NULL) { /* TODO: change this linear search into a binary one */ for(idx_t i=0; i < tt->dims[mode]; ++i) { if(tt->indmap[mode][i] == start) { start = i; } if(tt->indmap[mode][i]+1 == end) { end = i+1; break; } } } idx_t nnz = 0; for(idx_t n=0; n < tt->nnz; ++n) { /* Copy the nonzero if we own the slice. */ if(tt->ind[mode][n] >= start && tt->ind[mode][n] < end) { for(idx_t m=0; m < tt->nmodes; ++m) { ftt->ind[m][nnz] = tt->ind[m][n]; } ftt->vals[nnz++] = tt->vals[n]; } } /* update ftt dimensions and nnz */ ftt->nnz = nnz; ftt->dims[mode] = end - start; /* now map mode coords to [0, end-start) */ for(idx_t n=0; n < ftt->nnz; ++n) { assert(ftt->ind[mode][n] >= start); assert(ftt->ind[mode][n] < end); ftt->ind[mode][n] -= start; } /* create new indmap for mode */ for(idx_t m=0; m < tt->nmodes; ++m) { if(tt->indmap[m] == NULL) { break; } ftt->indmap[m] = (idx_t *) realloc(ftt->indmap[m], ftt->dims[m] * sizeof(idx_t)); /* mode indices are shifted. otherwise just copy */ if(m == mode) { for(idx_t i=0; i < ftt->dims[mode]; ++i) { ftt->indmap[mode][i] = tt->indmap[mode][i+start]; } } else { par_memcpy(ftt->indmap[m], tt->indmap[m], tt->dims[m] * sizeof(idx_t)); } } /* sanity check */ for(idx_t i=0; i < ftt->dims[mode]; ++i) { assert(i + start < end); } for(idx_t n=0; n < ftt->nnz; ++n) { assert(ftt->ind[mode][n] < end - start); } } void mpi_write_mats( matrix_t ** mats, permutation_t const * const perm, rank_info const * const rinfo, char const * const basename, idx_t const nmodes) { char * fname; idx_t const nfactors = mats[0]->J; MPI_Status status; idx_t maxdim = 0; idx_t maxlocaldim = 0; matrix_t * matbuf = NULL; val_t * vbuf = NULL; idx_t * loc_iperm = NULL; for(idx_t m=0; m < nmodes; ++m) { maxdim = SS_MAX(maxdim, rinfo->global_dims[m]); maxlocaldim = SS_MAX(maxlocaldim, mats[m]->I); } /* get the largest local dim */ if(rinfo->rank == 0) { MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } else { MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } if(rinfo->rank == 0) { matbuf = mat_alloc(maxdim, nfactors); loc_iperm = (idx_t *) splatt_malloc(maxdim * sizeof(idx_t)); vbuf = (val_t *) splatt_malloc(maxdim * nfactors * sizeof(val_t)); } for(idx_t m=0; m < nmodes; ++m) { /* root handles the writing */ if(rinfo->rank == 0) { asprintf(&fname, "%s%"SPLATT_PF_IDX".mat", basename, m+1); matbuf->I = rinfo->global_dims[m]; /* copy root's matrix to buffer */ for(idx_t i=0; i < mats[m]->I; ++i) { idx_t const gi = rinfo->layer_starts[m] + perm->iperms[m][i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = mats[m]->vals[f+(i*nfactors)]; } } /* receive matrix from each rank */ for(int p=1; p < rinfo->npes; ++p) { idx_t layerstart; idx_t nrows; MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 0, rinfo->comm_3d, &status); MPI_Recv(loc_iperm, nrows, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); /* permute buffer and copy into matbuf */ for(idx_t i=0; i < nrows; ++i) { idx_t const gi = layerstart + loc_iperm[i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = vbuf[f+(i*nfactors)]; } } } /* write the factor matrix to disk */ mat_write(matbuf, fname); /* clean up */ free(fname); } else { /* send matrix to root */ MPI_Send(&(rinfo->layer_starts[m]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(&(mats[m]->I), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(mats[m]->vals, mats[m]->I * mats[m]->J, SPLATT_MPI_VAL, 0, 0, rinfo->comm_3d); MPI_Send(perm->iperms[m] + rinfo->mat_start[m], mats[m]->I, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); } } /* foreach mode */ if(rinfo->rank == 0) { mat_free(matbuf); free(vbuf); free(loc_iperm); } } void mpi_write_part( sptensor_t const * const tt, permutation_t const * const perm, rank_info const * const rinfo) { /* file name is <rank>.part */ char name[256]; sprintf(name, "%d.part", rinfo->rank); FILE * fout = open_f(name, "w"); for(idx_t n=0; n < tt->nnz; ++n) { for(idx_t m=0; m < tt->nmodes; ++m) { /* map idx to original global coordinate */ idx_t idx = tt->ind[m][n]; if(tt->indmap[m] != NULL) { idx = tt->indmap[m][idx]; } if(perm->iperms[m] != NULL) { idx = perm->iperms[m][idx]; } /* write index */ fprintf(fout, "%"SPLATT_PF_IDX" ", 1+idx); } fprintf(fout, "%"SPLATT_PF_VAL"\n", tt->vals[n]); } fclose(fout); } sptensor_t * mpi_simple_distribute( char const * const ifname, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); sptensor_t * tt = NULL; FILE * fin = NULL; if(rank == 0) { fin = open_f(ifname, "r"); } switch(get_file_type(ifname)) { case SPLATT_FILE_TEXT_COORD: tt = p_tt_mpi_read_file(fin, comm); break; case SPLATT_FILE_BIN_COORD: tt = p_tt_mpi_read_binary_file(fin, comm); break; } if(rank == 0) { fclose(fin); } /* set dims info */ #pragma omp parallel for schedule(static, 1) for(idx_t m=0; m < tt->nmodes; ++m) { idx_t const * const inds = tt->ind[m]; idx_t dim = 1 +inds[0]; for(idx_t n=1; n < tt->nnz; ++n) { dim = SS_MAX(dim, 1 + inds[n]); } tt->dims[m] = dim; } return tt; } matrix_t * mpi_mat_rand( idx_t const mode, idx_t const nfactors, permutation_t const * const perm, rank_info * const rinfo) { idx_t const localdim = rinfo->mat_end[mode] - rinfo->mat_start[mode]; matrix_t * mymat = mat_alloc(localdim, nfactors); MPI_Status status; /* figure out buffer sizes */ idx_t maxlocaldim = localdim; if(rinfo->rank == 0) { MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } else { MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } /* root rank does the heavy lifting */ if(rinfo->rank == 0) { /* allocate buffers */ idx_t * loc_perm = splatt_malloc(maxlocaldim * sizeof(*loc_perm)); val_t * vbuf = splatt_malloc(maxlocaldim * nfactors * sizeof(*vbuf)); /* allocate initial factor */ matrix_t * full_factor = mat_rand(rinfo->global_dims[mode], nfactors); /* copy root's own matrix to output */ #pragma omp parallel for schedule(static) for(idx_t i=0; i < localdim; ++i) { idx_t const gi = rinfo->mat_start[mode] + perm->iperms[mode][i]; for(idx_t f=0; f < nfactors; ++f) { mymat->vals[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)]; } } /* communicate! */ for(int p=1; p < rinfo->npes; ++p) { /* first receive layer start and permutation info */ idx_t layerstart; idx_t nrows; MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 1, rinfo->comm_3d, &status); MPI_Recv(loc_perm, nrows, SPLATT_MPI_IDX, p, 2, rinfo->comm_3d, &status); /* fill buffer */ #pragma omp parallel for schedule(static) for(idx_t i=0; i < nrows; ++i) { idx_t const gi = layerstart + loc_perm[i]; for(idx_t f=0; f < nfactors; ++f) { vbuf[f + (i*nfactors)] = full_factor->vals[f+(gi*nfactors)]; } } /* send to rank p */ MPI_Send(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 3, rinfo->comm_3d); } mat_free(full_factor); splatt_free(loc_perm); splatt_free(vbuf); /* other ranks just send/recv */ } else { /* send permutation info to root */ MPI_Send(&(rinfo->layer_starts[mode]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(&localdim, 1, SPLATT_MPI_IDX, 0, 1, rinfo->comm_3d); MPI_Send(perm->iperms[mode] + rinfo->mat_start[mode], localdim, SPLATT_MPI_IDX, 0, 2, rinfo->comm_3d); /* receive factor */ MPI_Recv(mymat->vals, mymat->I * mymat->J, SPLATT_MPI_VAL, 0, 3, rinfo->comm_3d, &status); } return mymat; } sptensor_t * mpi_rearrange_by_part( sptensor_t const * const ttbuf, int const * const parts, MPI_Comm comm) { int rank, npes; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &npes); /* count how many to send to each process */ int * nsend = calloc(npes, sizeof(*nsend)); int * nrecv = calloc(npes, sizeof(*nrecv)); for(idx_t n=0; n < ttbuf->nnz; ++n) { nsend[parts[n]] += 1; } MPI_Alltoall(nsend, 1, MPI_INT, nrecv, 1, MPI_INT, comm); idx_t send_total = 0; idx_t recv_total = 0; for(int p=0; p < npes; ++p) { send_total += nsend[p]; recv_total += nrecv[p]; } assert(send_total = ttbuf->nnz); /* how many nonzeros I'll own */ idx_t const nowned = recv_total; int * send_disp = splatt_malloc((npes+1) * sizeof(*send_disp)); int * recv_disp = splatt_malloc((npes+1) * sizeof(*recv_disp)); /* recv_disp is const so we'll just fill it out once */ recv_disp[0] = 0; for(int p=1; p <= npes; ++p) { recv_disp[p] = recv_disp[p-1] + nrecv[p-1]; } /* allocate my tensor and send buffer */ sptensor_t * tt = tt_alloc(nowned, ttbuf->nmodes); idx_t * isend_buf = splatt_malloc(ttbuf->nnz * sizeof(*isend_buf)); /* rearrange into sendbuf and send one mode at a time */ for(idx_t m=0; m < ttbuf->nmodes; ++m) { /* prefix sum to make disps */ send_disp[0] = send_disp[1] = 0; for(int p=2; p <= npes; ++p) { send_disp[p] = send_disp[p-1] + nsend[p-2]; } idx_t const * const ind = ttbuf->ind[m]; for(idx_t n=0; n < ttbuf->nnz; ++n) { idx_t const index = send_disp[parts[n]+1]++; isend_buf[index] = ind[n]; } /* exchange indices */ MPI_Alltoallv(isend_buf, nsend, send_disp, SPLATT_MPI_IDX, tt->ind[m], nrecv, recv_disp, SPLATT_MPI_IDX, comm); } splatt_free(isend_buf); /* lastly, rearrange vals */ val_t * vsend_buf = splatt_malloc(ttbuf->nnz * sizeof(*vsend_buf)); send_disp[0] = send_disp[1] = 0; for(int p=2; p <= npes; ++p) { send_disp[p] = send_disp[p-1] + nsend[p-2]; } val_t const * const vals = ttbuf->vals; for(idx_t n=0; n < ttbuf->nnz; ++n) { idx_t const index = send_disp[parts[n]+1]++; vsend_buf[index] = vals[n]; } /* exchange vals */ MPI_Alltoallv(vsend_buf, nsend, send_disp, SPLATT_MPI_VAL, tt->vals, nrecv, recv_disp, SPLATT_MPI_VAL, comm); splatt_free(vsend_buf); splatt_free(send_disp); splatt_free(recv_disp); /* allocated with calloc */ free(nsend); free(nrecv); return tt; } int mpi_determine_med_owner( sptensor_t * const ttbuf, idx_t const n, rank_info * const rinfo) { int coords[MAX_NMODES]; assert(rinfo->decomp == SPLATT_DECOMP_MEDIUM); /* determine the coordinates of the owner rank */ for(idx_t m=0; m < ttbuf->nmodes; ++m) { idx_t const id = ttbuf->ind[m][n]; /* silly linear scan over each layer. * TODO: do a binary search */ for(int l=0; l <= rinfo->dims_3d[m]; ++l) { if(id < rinfo->layer_ptrs[m][l]) { coords[m] = l-1; break; } } } /* translate that to an MPI rank */ int owner; MPI_Cart_rank(rinfo->comm_3d, coords, &owner); return owner; }
omp_sections_reduction.c
<ompts:test> <ompts:testdescription>Test which checks the omp sections reduction directive with all its options.</ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp sections reduction</ompts:directive> <ompts:testcode> #include <stdio.h> #include <math.h> #include "omp_testsuite.h" int <ompts:testcode:functionname>omp_sections_reduction</ompts:testcode:functionname>(FILE * logFile){ <ompts:orphan:vars> int sum; int known_sum; double dpt,dsum; double dknown_sum; double dt=0.5; /* base of geometric row for + and - test*/ double rounding_error= 1.E-9; int diff; double ddiff; int product; int known_product; int logic_and; int bit_and; int logic_or; int bit_or; int exclusiv_bit_or; int logics[1000]; int i; int result; </ompts:orphan:vars> /* int my_islarger;*/ /*int is_larger=1;*/ sum =7; dpt =1; dsum=0; product =1; logic_and=1; bit_and=1; logic_or=0; bit_or=0; exclusiv_bit_or=0; result = 0; dt = 1./3.; known_sum = (999*1000)/2+7; <ompts:orphan> #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(+:sum)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=1;i<300;i++) { sum=sum+i; } } #pragma omp section { for (i=300;i<700;i++) { sum=sum+i; } } #pragma omp section { for (i=700;i<1000;i++) { sum=sum+i; } } } } if(known_sum!=sum) { ++result; fprintf(logFile,"Error in sum with integers: Result was %d instead of %d\n", sum,known_sum); } diff = (999*1000)/2; #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(-:diff)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=1;i<300;i++) { diff=diff-i; } } #pragma omp section { for (i=300;i<700;i++) { diff=diff-i; } } #pragma omp section { for (i=700;i<1000;i++) { diff=diff-i; } } } } if(diff != 0) { result++; fprintf(logFile,"Error in Difference with integers: Result was %d instead of 0.\n",diff); } for (i=0;i<20;++i) { dpt*=dt; } dknown_sum = (1-dpt)/(1-dt); #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(+:dsum)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=0;i<6;++i) { dsum += pow(dt,i); } } #pragma omp section { for (i=6;i<12;++i) { dsum += pow(dt,i); } } #pragma omp section { for (i=12;i<20;++i) { dsum += pow(dt,i); } } } } if( fabs(dsum-dknown_sum) > rounding_error ) { result++; fprintf(logFile,"Error in sum with doubles: Result was %f instead of %f (Difference: %E)\n",dsum,dknown_sum, dsum-dknown_sum); } dpt=1; for (i=0;i<20;++i) { dpt*=dt; } fprintf(logFile,"\n"); ddiff = (1-dpt)/(1-dt); #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(-:ddiff)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=0;i<6;++i) { ddiff -= pow(dt,i); } } #pragma omp section { for (i=6;i<12;++i) { ddiff -= pow(dt,i); } } #pragma omp section { for (i=12;i<20;++i) { ddiff -= pow(dt,i); } } } } if( fabs(ddiff) > rounding_error) { result++; fprintf(logFile,"Error in Difference with doubles: Result was %E instead of 0.0\n",ddiff); } known_product = 3628800; #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(*:product)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for(i=1;i<3;i++) { product *= i; } } #pragma omp section { for(i=3;i<7;i++) { product *= i; } } #pragma omp section { for(i=7;i<11;i++) { product *= i; } } } } if(known_product != product) { result++; fprintf(logFile,"Error in Product with integers: Result was %d instead of %d\n",product,known_product); } for(i=0;i<1000;i++) { logics[i]=1; } #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(&&:logic_and)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=1;i<300;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_and = (logic_and && logics[i]); } } } } if(!logic_and) { result++; fprintf(logFile,"Error in logic AND part 1\n"); } logic_and = 1; logics[501] = 0; #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(&&:logic_and)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=1;i<300;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_and = (logic_and && logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_and = (logic_and && logics[i]); } } } } if(logic_and) { result++; fprintf(logFile,"Error in logic AND part 2\n"); } for(i=0;i<1000;i++) { logics[i]=0; } #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(||:logic_or)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=1;i<300;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_or = (logic_or || logics[i]); } } } } if(logic_or) { result++; fprintf(logFile,"\nError in logic OR part 1\n"); } logic_or = 0; logics[501]=1; #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(||:logic_or)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for (i=1;i<300;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=300;i<700;i++) { logic_or = (logic_or || logics[i]); } } #pragma omp section { for (i=700;i<1000;i++) { logic_or = (logic_or || logics[i]); } } } } if(!logic_or) { result++; fprintf(logFile,"Error in logic OR part 2\n"); } for(i=0;i<1000;++i) { logics[i]=1; } #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(&:bit_and)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for(i=0;i<300;++i) { bit_and = (bit_and & logics[i]); } } #pragma omp section { for(i=300;i<700;++i) { bit_and = (bit_and & logics[i]); } } #pragma omp section { for(i=700;i<1000;++i) { bit_and = (bit_and & logics[i]); } } } } if(!bit_and) { result++; fprintf(logFile,"Error in BIT AND part 1\n"); } bit_and = 1; logics[501]=0; #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(&:bit_and)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for(i=0;i<300;++i) { bit_and = bit_and & logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { bit_and = bit_and & logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { bit_and = bit_and & logics[i]; } } } } if(bit_and) { result++; fprintf(logFile,"Error in BIT AND part 2\n"); } for(i=0;i<1000;i++) { logics[i]=0; } #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(|:bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for(i=0;i<300;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { bit_or = bit_or | logics[i]; } } } } if(bit_or) { result++; fprintf(logFile,"Error in BIT OR part 1\n"); } bit_or = 0; logics[501]=1; #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(|:bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for(i=0;i<300;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { bit_or = bit_or | logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { bit_or = bit_or | logics[i]; } } } } if(!bit_or) { result++; fprintf(logFile,"Error in BIT OR part 2\n"); } for(i=0;i<1000;i++) { logics[i]=0; } #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(^:exclusiv_bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for(i=0;i<300;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } } } if(exclusiv_bit_or) { result++; fprintf(logFile,"Error in EXCLUSIV BIT OR part 1\n"); } exclusiv_bit_or = 0; logics[501]=1; #pragma omp parallel { #pragma omp sections private(i) <ompts:check>reduction(^:exclusiv_bit_or)</ompts:check><ompts:crosscheck></ompts:crosscheck> { #pragma omp section { for(i=0;i<300;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=300;i<700;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } #pragma omp section { for(i=700;i<1000;++i) { exclusiv_bit_or = exclusiv_bit_or ^ logics[i]; } } } } if(!exclusiv_bit_or) { result++; fprintf(logFile,"Error in EXCLUSIV BIT OR part 2\n"); } </ompts:orphan> /*printf("\nResult:%d\n",result);*/ return (result==0); } </ompts:testcode> </ompts:test>
HelloOpenMP_fix4.c
#include <stdio.h> #include <omp.h> int main(int argc, char *argv[]){ #pragma omp parallel { int nthreads = omp_get_num_threads(); int thread_id = omp_get_thread_num(); #pragma omp master { printf("Goodbye slow serial world and Hello OpenMP!\n"); printf(" I have %d thread(s) and my thread id is %d\n",nthreads,thread_id); } } }
YAKL_reductions.h
#pragma once template <class T, int myMem> class ParallelMin; template <class T, int myMem> class ParallelMax; template <class T, int myMem> class ParallelSum; #ifdef YAKL_ARCH_HIP template <class T> class ParallelMin<T,memDevice> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelMin() { tmp = NULL; } ParallelMin(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelMin() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) hipcub::DeviceReduce::Min(tmp, nTmp, rsltP , rsltP , nItems ); tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage this->nItems = nItems; } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,""); yaklFreeDevice(tmp,""); } tmp = NULL; } T operator() (T *data) { T rslt; hipcub::DeviceReduce::Min(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction hipMemcpyAsync(&rslt,rsltP,sizeof(T),hipMemcpyDeviceToHost,0); // Copy result to host check_last_error(); fence(); return rslt; } void deviceReduce(T *data, T *devP) { hipcub::DeviceReduce::Min(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; template <class T> class ParallelMax<T,memDevice> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelMax() { tmp = NULL; } ParallelMax(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelMax() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) hipcub::DeviceReduce::Max(tmp, nTmp, rsltP , rsltP , nItems ); tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage this->nItems = nItems; } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,""); yaklFreeDevice(tmp,""); } tmp = NULL; } T operator() (T *data) { T rslt; hipcub::DeviceReduce::Max(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction hipMemcpyAsync(&rslt,rsltP,sizeof(T),hipMemcpyDeviceToHost,0); // Copy result to host check_last_error(); fence(); return rslt; } void deviceReduce(T *data, T *devP) { hipcub::DeviceReduce::Max(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; template <class T> class ParallelSum<T,memDevice> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelSum() { tmp = NULL; } ParallelSum(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelSum() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) hipcub::DeviceReduce::Sum(tmp, nTmp, rsltP , rsltP , nItems ); tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage this->nItems = nItems; } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,""); yaklFreeDevice(tmp,""); } tmp = NULL; } T operator() (T *data) { T rslt; hipcub::DeviceReduce::Sum(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction hipMemcpyAsync(&rslt,rsltP,sizeof(T),hipMemcpyDeviceToHost,0); // Copy result to host check_last_error(); fence(); return rslt; } void deviceReduce(T *data, T *devP) { hipcub::DeviceReduce::Sum(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; #elif defined(YAKL_ARCH_CUDA) template <class T> class ParallelMin<T,memDevice> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelMin() { tmp = NULL; } ParallelMin(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelMin() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) cub::DeviceReduce::Min(tmp, nTmp, rsltP , rsltP , nItems ); tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage this->nItems = nItems; } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,""); yaklFreeDevice(tmp,""); } tmp = NULL; } T operator() (T *data) { T rslt; cub::DeviceReduce::Min(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction cudaMemcpyAsync(&rslt,rsltP,sizeof(T),cudaMemcpyDeviceToHost,0); // Copy result to host check_last_error(); fence(); return rslt; } void deviceReduce(T *data, T *devP) { cub::DeviceReduce::Min(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; template <class T> class ParallelMax<T,memDevice> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelMax() { tmp = NULL; } ParallelMax(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelMax() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) cub::DeviceReduce::Max(tmp, nTmp, rsltP , rsltP , nItems ); tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage this->nItems = nItems; } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,""); yaklFreeDevice(tmp,""); } tmp = NULL; } T operator() (T *data) { T rslt; cub::DeviceReduce::Max(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction cudaMemcpyAsync(&rslt,rsltP,sizeof(T),cudaMemcpyDeviceToHost,0); // Copy result to host check_last_error(); fence(); return rslt; } void deviceReduce(T *data, T *devP) { cub::DeviceReduce::Max(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; template <class T> class ParallelSum<T,memDevice> { void *tmp; // Temporary storage size_t nTmp; // Size of temporary storage int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelSum() { tmp = NULL; } ParallelSum(int const nItems) { tmp = NULL; setup(nItems); } ~ParallelSum() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result // Get the amount of temporary storage needed (call with NULL storage pointer) cub::DeviceReduce::Sum(tmp, nTmp, rsltP , rsltP , nItems ); tmp = yaklAllocDevice(nTmp,""); // Allocate temporary storage this->nItems = nItems; } void finalize() { if (tmp != NULL) { yaklFreeDevice(rsltP,""); yaklFreeDevice(tmp,""); } tmp = NULL; } T operator() (T *data) { T rslt; cub::DeviceReduce::Sum(tmp, nTmp, data , rsltP , nItems , 0 ); // Compute the reduction cudaMemcpyAsync(&rslt,rsltP,sizeof(T),cudaMemcpyDeviceToHost,0); // Copy result to host check_last_error(); fence(); return rslt; } void deviceReduce(T *data, T *devP) { cub::DeviceReduce::Sum(tmp, nTmp, data , devP , nItems , 0 ); // Compute the reduction #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; #elif defined(YAKL_ARCH_SYCL) static inline size_t get_wg_size_for_reduction(size_t bytes_per_wi) { // The best work-group size depends on implementation details // We make the following assumptions, which aren't specific to DPC++: // - Bigger work-groups are better // - An implementation may reserve 1 element per work-item in shared memory // In practice, DPC++ seems to limit itself to 1/2 of this const size_t max_size = sycl_default_stream().get_device().get_info<sycl::info::device::max_work_group_size>(); const size_t local_mem = sycl_default_stream().get_device().get_info<sycl::info::device::local_mem_size>(); return std::min(local_mem / bytes_per_wi, max_size) / 2; } static inline size_t round_up(size_t N, size_t multiple) { return ((N + multiple - 1) / multiple) * multiple; } template <class T> static inline sycl::nd_range<1> get_reduction_range(size_t N, T reductionVars) { size_t bytes_per_wi = sizeof( std::remove_pointer_t<T> ); size_t L = get_wg_size_for_reduction(bytes_per_wi); size_t G = round_up(N, L); return sycl::nd_range<1>{G, L}; } template <class T> class ParallelMin<T,memDevice> { int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelMin() { rsltP = nullptr; } ParallelMin(int const nItems) { rsltP = nullptr; setup(nItems); } ~ParallelMin() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result this->nItems = nItems; } void finalize() { if(rsltP != nullptr) { yaklFreeDevice(rsltP,""); } rsltP = nullptr; } T operator() (T *data) { T rslt=0; sycl_default_stream().submit([&, nItems = this->nItems](sycl::handler &cgh) { cgh.parallel_for(get_reduction_range(nItems, rsltP), sycl::reduction(rsltP, sycl::minimum<>(), sycl::property::reduction::initialize_to_identity{}), [=](sycl::nd_item<1> idx, auto& min) { const int i = idx.get_global_linear_id(); if (i < nItems) { min.combine(data[i]); } }); }); sycl_default_stream().memcpy(&rslt,rsltP,sizeof(T)); // Copy result to host fence(); return rslt; } void deviceReduce(T *data, T *devP) { sycl_default_stream().submit([&, nItems = this->nItems](sycl::handler &cgh) { cgh.parallel_for(get_reduction_range(nItems, devP), sycl::reduction(devP, sycl::minimum<>(), sycl::property::reduction::initialize_to_identity{}), [=](sycl::nd_item<1> idx, auto& min) { const int i = idx.get_global_linear_id(); if (i < nItems) { min.combine(data[i]); } }); }); #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; template <class T> class ParallelMax<T,memDevice> { int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelMax() { rsltP = nullptr; } ParallelMax(int const nItems) { rsltP = nullptr; setup(nItems); } ~ParallelMax() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result this->nItems = nItems; } void finalize() { if(rsltP != nullptr) { yaklFreeDevice(rsltP,""); } rsltP = nullptr; } T operator() (T *data) { T rslt=0; sycl_default_stream().submit([&, nItems = this->nItems](sycl::handler &cgh) { cgh.parallel_for(get_reduction_range(nItems, rsltP), sycl::reduction(rsltP, sycl::maximum<>(), sycl::property::reduction::initialize_to_identity{}), [=](sycl::nd_item<1> idx, auto& max) { const int i = idx.get_global_linear_id(); if (i < nItems) { max.combine(data[i]); } }); }); sycl_default_stream().memcpy(&rslt,rsltP,sizeof(T)); // Copy result to host fence(); return rslt; } void deviceReduce(T *data, T *devP) { sycl_default_stream().submit([&, nItems = this->nItems](sycl::handler &cgh) { cgh.parallel_for(get_reduction_range(nItems, devP), sycl::reduction(devP, sycl::maximum<>(), sycl::property::reduction::initialize_to_identity{}), [=](sycl::nd_item<1> idx, auto& max) { const int i = idx.get_global_linear_id(); if (i < nItems) { max.combine(data[i]); } }); }); #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; template <class T> class ParallelSum<T,memDevice> { int nItems; // Number of items in the array that will be reduced T *rsltP; // Device pointer for reduction result public: ParallelSum() { rsltP = nullptr; } ParallelSum(int const nItems) { rsltP = nullptr; setup(nItems); } ~ParallelSum() { finalize(); } void setup(int const nItems) { finalize(); rsltP = (T *) yaklAllocDevice(sizeof(T),""); // Allocate device pointer for result this->nItems = nItems; } void finalize() { if(rsltP != nullptr) { yaklFreeDevice(rsltP,""); } rsltP = nullptr; } T operator() (T *data) { T rslt=0; sycl_default_stream().submit([&, nItems = this->nItems](sycl::handler &cgh) { cgh.parallel_for(get_reduction_range(nItems, rsltP), sycl::reduction(rsltP, std::plus<>(), sycl::property::reduction::initialize_to_identity{}), [=](sycl::nd_item<1> idx, auto& sum) { const int i = idx.get_global_linear_id(); if (i < nItems) { sum.combine(data[i]); } }); }); sycl_default_stream().memcpy(&rslt,rsltP,sizeof(T)); fence(); return rslt; } void deviceReduce(T *data, T *devP) { sycl_default_stream().submit([&, nItems = this->nItems](sycl::handler &cgh) { cgh.parallel_for(get_reduction_range(nItems, rsltP), sycl::reduction(rsltP, std::plus<>(), sycl::property::reduction::initialize_to_identity{}), [=](sycl::nd_item<1> idx, auto& sum) { const int i = idx.get_global_linear_id(); if (i < nItems) { sum.combine(data[i]); } }); }); #if defined(YAKL_AUTO_FENCE) || defined(YAKL_DEBUG) fence(); #endif } }; #elif defined(YAKL_ARCH_OPENMP45) template <class T> class ParallelSum<T,memDevice> { int nItems; public: ParallelSum() {} ParallelSum(int const nItems) { this->nItems = nItems; } ~ParallelSum() { } T operator() (T *data) { T rslt = 0; #pragma omp target teams distribute parallel for simd reduction(+:rslt) is_device_ptr(data) for(int i=0; i<nItems; i++) { rslt += data[i]; } return rslt; } void deviceReduce(T *data, T *devP) { T rslt = 0; #pragma omp target teams distribute parallel for simd reduction(+:rslt) is_device_ptr(data) for (int i=0; i<nItems; i++) { rslt += data[i]; } omp_target_memcpy(devP,&rslt,sizeof(T),0,0,omp_get_default_device(),omp_get_initial_device()); #pragma omp taskwait check_last_error(); } }; template <class T> class ParallelMin<T,memDevice> { int nItems; public: ParallelMin() {} ParallelMin(int const nItems) { this->nItems = nItems; } ~ParallelMin() { } T operator() (T *data) { T rslt = std::numeric_limits<T>::max(); #pragma omp target teams distribute parallel for simd reduction(min:rslt) is_device_ptr(data) for(int i=0; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *devP) { T rslt = std::numeric_limits<T>::max(); #pragma omp target teams distribute parallel for simd reduction(min:rslt) is_device_ptr(data) for (int i=0; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } omp_target_memcpy(devP,&rslt,sizeof(T),0,0,omp_get_default_device(),omp_get_initial_device()); #pragma omp taskwait check_last_error(); } }; template <class T> class ParallelMax<T,memDevice> { int nItems; public: ParallelMax() {} ParallelMax(int const nItems) { this->nItems = nItems; } ~ParallelMax() { } T operator() (T *data) { T rslt = std::numeric_limits<T>::lowest(); #pragma omp target teams distribute parallel for simd reduction(max:rslt) is_device_ptr(data) for(int i=0; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *devP) { T rslt = std::numeric_limits<T>::lowest(); #pragma omp target teams distribute parallel for simd reduction(max:rslt) is_device_ptr(data) for (int i=0; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } omp_target_memcpy(devP,&rslt,sizeof(T),0,0,omp_get_default_device(),omp_get_initial_device()); #pragma omp taskwait check_last_error(); } }; #elif defined(YAKL_ARCH_OPENMP) template <class T> class ParallelSum<T,memDevice> { int nItems; public: ParallelSum() {} ParallelSum(int const nItems) { this->nItems = nItems; } ~ParallelSum() { } T operator() (T *data) { T rslt = 0; #pragma omp parallel for reduction(+:rslt) for(int i=0; i<nItems; i++) { rslt += data[i]; } return rslt; } void deviceReduce(T *data, T *devP) { T rslt = 0; #pragma omp parallel for reduction(+:rslt) for (int i=0; i<nItems; i++) { rslt += data[i]; } *devP = rslt; } }; template <class T> class ParallelMin<T,memDevice> { int nItems; public: ParallelMin() {} ParallelMin(int const nItems) { this->nItems = nItems; } ~ParallelMin() { } T operator() (T *data) { T rslt = std::numeric_limits<T>::max(); #pragma omp parallel for reduction(min:rslt) for(int i=0; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *devP) { T rslt = std::numeric_limits<T>::max(); #pragma omp parallel for reduction(min:rslt) for (int i=0; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } *devP = rslt; } }; template <class T> class ParallelMax<T,memDevice> { int nItems; public: ParallelMax() {} ParallelMax(int const nItems) { this->nItems = nItems; } ~ParallelMax() { } T operator() (T *data) { T rslt = std::numeric_limits<T>::lowest(); #pragma omp parallel for reduction(max:rslt) for(int i=0; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *devP) { T rslt = std::numeric_limits<T>::lowest(); #pragma omp parallel for reduction(max:rslt) for (int i=0; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } *devP = rslt; } }; #else template <class T> class ParallelMin<T,memDevice> { int nItems; // Number of items in the array that will be reduced public: ParallelMin() {} ParallelMin(int const nItems) { this->nItems = nItems; } ~ParallelMin() { } void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; for (int i=1; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *rslt) { *(rslt) = data[0]; for (int i=1; i<nItems; i++) { *(rslt) = data[i] < *(rslt) ? data[i] : rslt; } } }; template <class T> class ParallelMax<T,memDevice> { int nItems; // Number of items in the array that will be reduced public: ParallelMax() {} ParallelMax(int const nItems) { this->nItems = nItems; } ~ParallelMax() { } void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; for (int i=1; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *rslt) { *(rslt) = data[0]; for (int i=1; i<nItems; i++) { *(rslt) = data[i] > *(rslt) ? data[i] : rslt; } } }; template <class T> class ParallelSum<T,memDevice> { int nItems; // Number of items in the array that will be reduced public: ParallelSum() {} ParallelSum(int const nItems) { this->nItems = nItems; } ~ParallelSum() { } void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; for (int i=1; i<nItems; i++) { rslt += data[i]; } return rslt; } void deviceReduce(T *data, T *rslt) { *(rslt) = data[0]; for (int i=1; i<nItems; i++) { *(rslt) += data[i]; } } }; #endif template <class T> class ParallelMin<T,memHost> { int nItems; // Number of items in the array that will be reduced public: ParallelMin() {} ParallelMin(int const nItems) { this->nItems = nItems; } ~ParallelMin() { } void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; for (int i=1; i<nItems; i++) { rslt = data[i] < rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *rslt) { *(rslt) = data[0]; for (int i=1; i<nItems; i++) { *(rslt) = data[i] < *(rslt) ? data[i] : rslt; } } }; template <class T> class ParallelMax<T,memHost> { int nItems; // Number of items in the array that will be reduced public: ParallelMax() {} ParallelMax(int const nItems) { this->nItems = nItems; } ~ParallelMax() { } void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; for (int i=1; i<nItems; i++) { rslt = data[i] > rslt ? data[i] : rslt; } return rslt; } void deviceReduce(T *data, T *rslt) { *(rslt) = data[0]; for (int i=1; i<nItems; i++) { *(rslt) = data[i] > *(rslt) ? data[i] : rslt; } } }; template <class T> class ParallelSum<T,memHost> { int nItems; // Number of items in the array that will be reduced public: ParallelSum() {} ParallelSum(int const nItems) { this->nItems = nItems; } ~ParallelSum() { } void setup(int nItems) { this->nItems = nItems; } T operator() (T *data) { T rslt = data[0]; for (int i=1; i<nItems; i++) { rslt += data[i]; } return rslt; } void deviceReduce(T *data, T *rslt) { *(rslt) = data[0]; for (int i=1; i<nItems; i++) { *(rslt) += data[i]; } } };
plot.h
#ifndef OPENMC_PLOT_H #define OPENMC_PLOT_H #include <unordered_map> #include <sstream> #include "pugixml.hpp" #include "xtensor/xarray.hpp" #include "hdf5.h" #include "openmc/position.h" #include "openmc/constants.h" #include "openmc/cell.h" #include "openmc/error.h" #include "openmc/geometry.h" #include "openmc/particle.h" #include "openmc/xml_interface.h" #include "openmc/random_lcg.h" namespace openmc { //=============================================================================== // Global variables //=============================================================================== class Plot; namespace model { extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index extern std::vector<Plot> plots; //!< Plot instance container extern uint64_t plotter_prn_seeds[N_STREAMS]; // Random number seeds used for plotter extern int plotter_stream; // Stream index used by the plotter } // namespace model //=============================================================================== // RGBColor holds color information for plotted objects //=============================================================================== struct RGBColor { //Constructors RGBColor() : red(0), green(0), blue(0) { }; RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { }; RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { }; RGBColor(const std::vector<int> &v) { if (v.size() != 3) { throw std::out_of_range("Incorrect vector size for RGBColor."); } red = v[0]; green = v[1]; blue = v[2]; } bool operator ==(const RGBColor& other) { return red == other.red && green == other.green && blue == other.blue; } // Members uint8_t red, green, blue; }; // some default colors const RGBColor WHITE {255, 255, 255}; const RGBColor RED {255, 0, 0}; typedef xt::xtensor<RGBColor, 2> ImageData; struct IdData { // Constructor IdData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids }; struct PropertyData { // Constructor PropertyData(size_t h_res, size_t v_res); // Methods void set_value(size_t y, size_t x, const Particle& p, int level); void set_overlap(size_t y, size_t x); // Members xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data }; enum class PlotType { slice = 1, voxel = 2 }; enum class PlotBasis { xy = 1, xz = 2, yz = 3 }; enum class PlotColorBy { cells = 0, mats = 1 }; //=============================================================================== // Plot class //=============================================================================== class PlotBase { public: template<class T> T get_map() const; // Members public: Position origin_; //!< Plot origin in geometry Position width_; //!< Plot width in geometry PlotBasis basis_; //!< Plot basis (XY/XZ/YZ) std::array<size_t, 3> pixels_; //!< Plot size in pixels bool color_overlaps_; //!< Show overlapping cells? int level_; //!< Plot universe level }; template<class T> T PlotBase::get_map() const { size_t width = pixels_[0]; size_t height = pixels_[1]; // get pixel size double in_pixel = (width_[0])/static_cast<double>(width); double out_pixel = (width_[1])/static_cast<double>(height); // size data array T data(width, height); // setup basis indices and initial position centered on pixel int in_i, out_i; Position xyz = origin_; switch(basis_) { case PlotBasis::xy : in_i = 0; out_i = 1; break; case PlotBasis::xz : in_i = 0; out_i = 2; break; case PlotBasis::yz : in_i = 1; out_i = 2; break; default: UNREACHABLE(); } // set initial position xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.; xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.; // arbitrary direction Direction dir = {0.7071, 0.7071, 0.0}; #pragma omp parallel { Particle p; p.r() = xyz; p.u() = dir; p.coord_[0].universe = model::root_universe; int level = level_; int j{}; #pragma omp for for (int y = 0; y < height; y++) { p.r()[out_i] = xyz[out_i] - out_pixel * y; for (int x = 0; x < width; x++) { p.r()[in_i] = xyz[in_i] + in_pixel * x; p.n_coord_ = 1; // local variables bool found_cell = find_cell(p, 0); j = p.n_coord_ - 1; if (level >= 0) { j = level; } if (found_cell) { data.set_value(y, x, p, j); } if (color_overlaps_ && check_cell_overlap(p, false)) { data.set_overlap(y, x); } } // inner for } // outer for } // omp parallel return data; } class Plot : public PlotBase { public: // Constructor Plot(pugi::xml_node plot); // Methods private: void set_id(pugi::xml_node plot_node); void set_type(pugi::xml_node plot_node); void set_output_path(pugi::xml_node plot_node); void set_bg_color(pugi::xml_node plot_node); void set_basis(pugi::xml_node plot_node); void set_origin(pugi::xml_node plot_node); void set_width(pugi::xml_node plot_node); void set_universe(pugi::xml_node plot_node); void set_default_colors(pugi::xml_node plot_node); void set_user_colors(pugi::xml_node plot_node); void set_meshlines(pugi::xml_node plot_node); void set_mask(pugi::xml_node plot_node); void set_overlap_color(pugi::xml_node plot_node); // Members public: int id_; //!< Plot ID PlotType type_; //!< Plot type (Slice/Voxel) PlotColorBy color_by_; //!< Plot coloring (cell/material) int meshlines_width_; //!< Width of lines added to the plot int index_meshlines_mesh_ {-1}; //!< Index of the mesh to draw on the plot RGBColor meshlines_color_; //!< Color of meshlines on the plot RGBColor not_found_ {WHITE}; //!< Plot background color RGBColor overlap_color_ {RED}; //!< Plot overlap color std::vector<RGBColor> colors_; //!< Plot colors std::string path_plot_; //!< Plot output filename }; //=============================================================================== // Non-member functions //=============================================================================== //! Add mesh lines to image data of a plot object //! \param[in] plot object //! \param[out] image data associated with the plot object void draw_mesh_lines(Plot pl, ImageData& data); //! Write a ppm image to file using a plot object's image data //! \param[in] plot object //! \param[out] image data associated with the plot object void output_ppm(Plot pl, const ImageData& data); //! Initialize a voxel file //! \param[in] id of an open hdf5 file //! \param[in] dimensions of the voxel file (dx, dy, dz) //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to memory space of voxel data void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace, hid_t* dset, hid_t* memspace); //! Write a section of the voxel data to hdf5 //! \param[in] voxel slice //! \param[out] dataspace pointer to voxel data //! \param[out] dataset pointer to voxesl data //! \param[out] pointer to data to write void voxel_write_slice(int x, hid_t dspace, hid_t dset, hid_t memspace, void* buf); //! Close voxel file entities //! \param[in] data space to close //! \param[in] dataset to close //! \param[in] memory space to close void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace); //=============================================================================== // External functions //=============================================================================== //! Read plot specifications from a plots.xml file void read_plots_xml(); //! Create a ppm image for a plot object //! \param[in] plot object void create_ppm(Plot pl); //! Create an hdf5 voxel file for a plot object //! \param[in] plot object void create_voxel(Plot pl); //! Create a randomly generated RGB color //! \return RGBColor with random value RGBColor random_color(); } // namespace openmc #endif // OPENMC_PLOT_H
flowinfo_ipv4_src.c
/* * Copyright 2014-2017 Nippon Telegraph and Telephone Corporation. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file flowinfo_ipv4_src.c * @brief Optimized flow database for dataplane, for ipv4_src */ #include <stdlib.h> #include "openflow.h" #include "lagopus/flowdb.h" #include "pktbuf.h" #include "packet.h" #include "lagopus/flowinfo.h" #define OXM_FIELD_TYPE(field) ((field) >> 1) #define IPV4_SRC_BITLEN (32) static lagopus_result_t add_flow_ipv4_src_mask(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_ipv4_src_mask(struct flowinfo *, struct flow *); static struct flow * match_flow_ipv4_src_mask(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_ipv4_src_mask(struct flowinfo *, struct flow *); static void destroy_flowinfo_ipv4_src_mask(struct flowinfo *); static lagopus_result_t add_flow_ipv4_src(struct flowinfo *, struct flow *); static lagopus_result_t del_flow_ipv4_src(struct flowinfo *, struct flow *); static struct flow * match_flow_ipv4_src(struct flowinfo *, struct lagopus_packet *, int32_t *); static struct flow * find_flow_ipv4_src(struct flowinfo *, struct flow *); static void destroy_flowinfo_ipv4_src(struct flowinfo *); static lagopus_result_t get_match_ipv4_src(const struct match_list *match_list, uint32_t *ipv4_src, uint32_t *mask) { const struct match *match; TAILQ_FOREACH(match, match_list, entry) { if (match->oxm_field == (OFPXMT_OFB_IPV4_SRC << 1) + 1) { OS_MEMCPY(ipv4_src, match->oxm_value, sizeof(*ipv4_src)); OS_MEMCPY(mask, &match->oxm_value[4], sizeof(*mask)); break; } if (OXM_FIELD_TYPE(match->oxm_field) == OFPXMT_OFB_IPV4_SRC) { OS_MEMCPY(ipv4_src, match->oxm_value, sizeof(*ipv4_src)); *mask = 0xffffffff; break; } } if (match == NULL) { return LAGOPUS_RESULT_NOT_FOUND; } return LAGOPUS_RESULT_OK; } struct flowinfo * new_flowinfo_ipv4_src_mask(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { self->nflow = 0; self->nnext = 0; self->next = malloc(1); self->misc = new_flowinfo_ipv4(); self->add_func = add_flow_ipv4_src_mask; self->del_func = del_flow_ipv4_src_mask; self->match_func = match_flow_ipv4_src_mask; self->find_func = find_flow_ipv4_src_mask; self->destroy_func = destroy_flowinfo_ipv4_src_mask; } return self; } static void destroy_flowinfo_ipv4_src_mask(struct flowinfo *self) { struct flowinfo *flowinfo; unsigned int i; for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flowinfo->destroy_func(flowinfo); } free(self->next); free(self); } static void freeup_flowinfo(void *val) { struct flowinfo *flowinfo; flowinfo = val; flowinfo->destroy_func(flowinfo); } struct flowinfo * new_flowinfo_ipv4_src(void) { struct flowinfo *self; self = calloc(1, sizeof(struct flowinfo)); if (self != NULL) { lagopus_hashmap_create(&self->hashmap, LAGOPUS_HASHMAP_TYPE_ONE_WORD, freeup_flowinfo); /* misc is not used */ self->add_func = add_flow_ipv4_src; self->del_func = del_flow_ipv4_src; self->match_func = match_flow_ipv4_src; self->find_func = find_flow_ipv4_src; self->destroy_func = destroy_flowinfo_ipv4_src; } return self; } static void destroy_flowinfo_ipv4_src(struct flowinfo *self) { lagopus_hashmap_destroy(&self->hashmap, true); free(self); } static lagopus_result_t add_flow_ipv4_src_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_src, mask; lagopus_result_t rv; unsigned int i; rv = get_match_ipv4_src(&flow->match_list, &ipv4_src, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { /* new node. */ flowinfo = new_flowinfo_ipv4_src(); flowinfo->userdata = mask; self->next = realloc(self->next, (unsigned long)(self->nnext + 1) * sizeof(struct flowinfo *)); self->next[self->nnext] = flowinfo; self->nnext++; } rv = flowinfo->add_func(flowinfo, flow); } else { rv = self->misc->add_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } return rv; } static lagopus_result_t del_flow_ipv4_src_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_src, mask; lagopus_result_t rv; unsigned int i; rv = get_match_ipv4_src(&flow->match_list, &ipv4_src, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return LAGOPUS_RESULT_NOT_FOUND; } rv = flowinfo->del_func(flowinfo, flow); if (flowinfo->nflow == 0) { flowinfo->destroy_func(flowinfo); self->nnext--; memmove(&self->next[i], &self->next[i + 1], (self->nnext - i) * sizeof(struct flowinfo **)); } } else { rv = self->misc->del_func(self->misc, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } return rv; } static struct flow * match_flow_ipv4_src_mask(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; struct flow *flow[self->nnext], *matched, *alt_flow; struct flow mismatched = { .priority = 0, .flags = 0, .idle_timeout = 0, .hard_timeout = 0, .match_list = {NULL, NULL}, .instruction_list = {NULL, NULL}, .field_bits = 0 }; unsigned int i; matched = &mismatched; //#pragma omp parallel for for (i = 0; i < self->nnext; i++) { flowinfo = self->next[i]; flow[i] = flowinfo->match_func(flowinfo, pkt, pri); } for (i = 0; i < self->nnext; i++) { if (flow[i] != NULL && flow[i]->priority > matched->priority) { matched = flow[i]; } } alt_flow = self->misc->match_func(self->misc, pkt, pri); if (alt_flow != NULL) { matched = alt_flow; } if (matched == &mismatched) { matched = NULL; } return matched; } static struct flow * find_flow_ipv4_src_mask(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_src, mask; lagopus_result_t rv; unsigned int i; rv = get_match_ipv4_src(&flow->match_list, &ipv4_src, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = LAGOPUS_RESULT_NOT_FOUND; for (i = 0; i < self->nnext; i++) { if (self->next[i]->userdata == mask) { flowinfo = self->next[i]; rv = LAGOPUS_RESULT_OK; break; } } if (rv == LAGOPUS_RESULT_NOT_FOUND) { return NULL; } } else { flowinfo = self->misc; } return flowinfo->find_func(flowinfo, flow); } static lagopus_result_t add_flow_ipv4_src(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_src, mask; lagopus_result_t rv; rv = get_match_ipv4_src(&flow->match_list, &ipv4_src, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_src, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { void *val; flowinfo = new_flowinfo_ipv4(); val = flowinfo; lagopus_hashmap_add_no_lock(&self->hashmap, (void *)ipv4_src, (void *)&val, false); } rv = flowinfo->add_func(flowinfo, flow); if (rv == LAGOPUS_RESULT_OK) { self->nflow++; } } return rv; } static lagopus_result_t del_flow_ipv4_src(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_src, mask; lagopus_result_t rv; rv = get_match_ipv4_src(&flow->match_list, &ipv4_src, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_src, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { flowinfo->del_func(flowinfo, flow); } if (rv == LAGOPUS_RESULT_OK) { self->nflow--; } } out: return rv; } static struct flow * match_flow_ipv4_src(struct flowinfo *self, struct lagopus_packet *pkt, int32_t *pri) { struct flowinfo *flowinfo; uint32_t ipv4_src; struct flow *flow; lagopus_result_t rv; flow = NULL; ipv4_src = (pkt->ipv4->ip_src.s_addr & (uint32_t)self->userdata); rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_src, (void *)&flowinfo); if (rv == LAGOPUS_RESULT_OK) { flow = flowinfo->match_func(flowinfo, pkt, pri); } return flow; } static struct flow * find_flow_ipv4_src(struct flowinfo *self, struct flow *flow) { struct flowinfo *flowinfo; uint32_t ipv4_src, mask; lagopus_result_t rv; rv = get_match_ipv4_src(&flow->match_list, &ipv4_src, &mask); if (rv == LAGOPUS_RESULT_OK) { rv = lagopus_hashmap_find_no_lock(&self->hashmap, (void *)ipv4_src, (void *)&flowinfo); if (rv != LAGOPUS_RESULT_OK) { return NULL; } return flowinfo->find_func(flowinfo, flow); } else { return self->misc->find_func(self->misc, flow); } }
dct.c
/***************************************************************************** * dct.c: h264 encoder library ***************************************************************************** * Copyright (C) 2003-2008 x264 project * * Authors: Loren Merritt <lorenm@u.washington.edu> * Laurent Aimar <fenrir@via.ecp.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA. *****************************************************************************/ #include "common.h" #include <omp.h> #ifdef HAVE_MMX # include "x86/dct.h" #endif #ifdef ARCH_PPC # include "ppc/dct.h" #endif int x264_dct4_weight2_zigzag[2][16]; int x264_dct8_weight2_zigzag[2][64]; /* * XXX For all dct dc : input could be equal to output so ... */ static void dct4x4dc( int16_t d[4][4] ) { int16_t tmp[4][4]; int s01, s23; int d01, d23; int i; for( i = 0; i < 4; i++ ) { s01 = d[i][0] + d[i][1]; d01 = d[i][0] - d[i][1]; s23 = d[i][2] + d[i][3]; d23 = d[i][2] - d[i][3]; tmp[0][i] = s01 + s23; tmp[1][i] = s01 - s23; tmp[2][i] = d01 - d23; tmp[3][i] = d01 + d23; } for( i = 0; i < 4; i++ ) { s01 = tmp[i][0] + tmp[i][1]; d01 = tmp[i][0] - tmp[i][1]; s23 = tmp[i][2] + tmp[i][3]; d23 = tmp[i][2] - tmp[i][3]; d[i][0] = ( s01 + s23 + 1 ) >> 1; d[i][1] = ( s01 - s23 + 1 ) >> 1; d[i][2] = ( d01 - d23 + 1 ) >> 1; d[i][3] = ( d01 + d23 + 1 ) >> 1; } } static void idct4x4dc( int16_t d[4][4] ) { int16_t tmp[4][4]; int s01, s23; int d01, d23; int i; for( i = 0; i < 4; i++ ) { s01 = d[i][0] + d[i][1]; d01 = d[i][0] - d[i][1]; s23 = d[i][2] + d[i][3]; d23 = d[i][2] - d[i][3]; tmp[0][i] = s01 + s23; tmp[1][i] = s01 - s23; tmp[2][i] = d01 - d23; tmp[3][i] = d01 + d23; } for( i = 0; i < 4; i++ ) { s01 = tmp[i][0] + tmp[i][1]; d01 = tmp[i][0] - tmp[i][1]; s23 = tmp[i][2] + tmp[i][3]; d23 = tmp[i][2] - tmp[i][3]; d[i][0] = s01 + s23; d[i][1] = s01 - s23; d[i][2] = d01 - d23; d[i][3] = d01 + d23; } } static inline void pixel_sub_wxh( int16_t *diff, int i_size, uint8_t *pix1, int i_pix1, uint8_t *pix2, int i_pix2 ) { int y, x; for( y = 0; y < i_size; y++ ) { for( x = 0; x < i_size; x++ ) { diff[x + y*i_size] = pix1[x] - pix2[x]; } pix1 += i_pix1; pix2 += i_pix2; } } static void sub4x4_dct( int16_t dct[4][4], uint8_t *pix1, uint8_t *pix2 ) { int16_t d[4][4]; int16_t tmp[4][4]; int i; pixel_sub_wxh( (int16_t*)d, 4, pix1, FENC_STRIDE, pix2, FDEC_STRIDE ); for( i = 0; i < 4; i++ ) { const int s03 = d[i][0] + d[i][3]; const int s12 = d[i][1] + d[i][2]; const int d03 = d[i][0] - d[i][3]; const int d12 = d[i][1] - d[i][2]; tmp[0][i] = s03 + s12; tmp[1][i] = 2*d03 + d12; tmp[2][i] = s03 - s12; tmp[3][i] = d03 - 2*d12; } for( i = 0; i < 4; i++ ) { const int s03 = tmp[i][0] + tmp[i][3]; const int s12 = tmp[i][1] + tmp[i][2]; const int d03 = tmp[i][0] - tmp[i][3]; const int d12 = tmp[i][1] - tmp[i][2]; dct[i][0] = s03 + s12; dct[i][1] = 2*d03 + d12; dct[i][2] = s03 - s12; dct[i][3] = d03 - 2*d12; } } static void sub8x8_dct( int16_t dct[4][4][4], uint8_t *pix1, uint8_t *pix2 ) { sub4x4_dct( dct[0], &pix1[0], &pix2[0] ); sub4x4_dct( dct[1], &pix1[4], &pix2[4] ); sub4x4_dct( dct[2], &pix1[4*FENC_STRIDE+0], &pix2[4*FDEC_STRIDE+0] ); sub4x4_dct( dct[3], &pix1[4*FENC_STRIDE+4], &pix2[4*FDEC_STRIDE+4] ); } static void sub16x16_dct( int16_t dct[16][4][4], uint8_t *pix1, uint8_t *pix2 ) { sub8x8_dct( &dct[ 0], &pix1[0], &pix2[0] ); sub8x8_dct( &dct[ 4], &pix1[8], &pix2[8] ); sub8x8_dct( &dct[ 8], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] ); sub8x8_dct( &dct[12], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] ); } static void add4x4_idct( uint8_t *p_dst, int16_t dct[4][4] ) { int16_t d[4][4]; int16_t tmp[4][4]; int x, y; int i; for( i = 0; i < 4; i++ ) { const int s02 = dct[0][i] + dct[2][i]; const int d02 = dct[0][i] - dct[2][i]; const int s13 = dct[1][i] + (dct[3][i]>>1); const int d13 = (dct[1][i]>>1) - dct[3][i]; tmp[i][0] = s02 + s13; tmp[i][1] = d02 + d13; tmp[i][2] = d02 - d13; tmp[i][3] = s02 - s13; } for( i = 0; i < 4; i++ ) { const int s02 = tmp[0][i] + tmp[2][i]; const int d02 = tmp[0][i] - tmp[2][i]; const int s13 = tmp[1][i] + (tmp[3][i]>>1); const int d13 = (tmp[1][i]>>1) - tmp[3][i]; d[0][i] = ( s02 + s13 + 32 ) >> 6; d[1][i] = ( d02 + d13 + 32 ) >> 6; d[2][i] = ( d02 - d13 + 32 ) >> 6; d[3][i] = ( s02 - s13 + 32 ) >> 6; } for( y = 0; y < 4; y++ ) { for( x = 0; x < 4; x++ ) { p_dst[x] = x264_clip_uint8( p_dst[x] + d[y][x] ); } p_dst += FDEC_STRIDE; } } static void add8x8_idct( uint8_t *p_dst, int16_t dct[4][4][4] ) { add4x4_idct( &p_dst[0], dct[0] ); add4x4_idct( &p_dst[4], dct[1] ); add4x4_idct( &p_dst[4*FDEC_STRIDE+0], dct[2] ); add4x4_idct( &p_dst[4*FDEC_STRIDE+4], dct[3] ); } static void add16x16_idct( uint8_t *p_dst, int16_t dct[16][4][4] ) { add8x8_idct( &p_dst[0], &dct[0] ); add8x8_idct( &p_dst[8], &dct[4] ); add8x8_idct( &p_dst[8*FDEC_STRIDE+0], &dct[8] ); add8x8_idct( &p_dst[8*FDEC_STRIDE+8], &dct[12] ); } /**************************************************************************** * 8x8 transform: ****************************************************************************/ #define DCT8_1D {\ const int s07 = SRC(0) + SRC(7);\ const int s16 = SRC(1) + SRC(6);\ const int s25 = SRC(2) + SRC(5);\ const int s34 = SRC(3) + SRC(4);\ const int a0 = s07 + s34;\ const int a1 = s16 + s25;\ const int a2 = s07 - s34;\ const int a3 = s16 - s25;\ const int d07 = SRC(0) - SRC(7);\ const int d16 = SRC(1) - SRC(6);\ const int d25 = SRC(2) - SRC(5);\ const int d34 = SRC(3) - SRC(4);\ const int a4 = d16 + d25 + (d07 + (d07>>1));\ const int a5 = d07 - d34 - (d25 + (d25>>1));\ const int a6 = d07 + d34 - (d16 + (d16>>1));\ const int a7 = d16 - d25 + (d34 + (d34>>1));\ DST(0) = a0 + a1 ;\ DST(1) = a4 + (a7>>2);\ DST(2) = a2 + (a3>>1);\ DST(3) = a5 + (a6>>2);\ DST(4) = a0 - a1 ;\ DST(5) = a6 - (a5>>2);\ DST(6) = (a2>>1) - a3 ;\ DST(7) = (a4>>2) - a7 ;\ } static void sub8x8_dct8( int16_t dct[8][8], uint8_t *pix1, uint8_t *pix2 ) { int i; int16_t tmp[8][8]; pixel_sub_wxh( (int16_t*)tmp, 8, pix1, FENC_STRIDE, pix2, FDEC_STRIDE ); #define SRC(x) tmp[x][i] #define DST(x) tmp[x][i] for( i = 0; i < 8; i++ ) DCT8_1D #undef SRC #undef DST #define SRC(x) tmp[i][x] #define DST(x) dct[x][i] for( i = 0; i < 8; i++ ) DCT8_1D #undef SRC #undef DST } static void sub16x16_dct8( int16_t dct[4][8][8], uint8_t *pix1, uint8_t *pix2 ) { sub8x8_dct8( dct[0], &pix1[0], &pix2[0] ); sub8x8_dct8( dct[1], &pix1[8], &pix2[8] ); sub8x8_dct8( dct[2], &pix1[8*FENC_STRIDE+0], &pix2[8*FDEC_STRIDE+0] ); sub8x8_dct8( dct[3], &pix1[8*FENC_STRIDE+8], &pix2[8*FDEC_STRIDE+8] ); } #define IDCT8_1D {\ const int a0 = SRC(0) + SRC(4);\ const int a2 = SRC(0) - SRC(4);\ const int a4 = (SRC(2)>>1) - SRC(6);\ const int a6 = (SRC(6)>>1) + SRC(2);\ const int b0 = a0 + a6;\ const int b2 = a2 + a4;\ const int b4 = a2 - a4;\ const int b6 = a0 - a6;\ const int a1 = -SRC(3) + SRC(5) - SRC(7) - (SRC(7)>>1);\ const int a3 = SRC(1) + SRC(7) - SRC(3) - (SRC(3)>>1);\ const int a5 = -SRC(1) + SRC(7) + SRC(5) + (SRC(5)>>1);\ const int a7 = SRC(3) + SRC(5) + SRC(1) + (SRC(1)>>1);\ const int b1 = (a7>>2) + a1;\ const int b3 = a3 + (a5>>2);\ const int b5 = (a3>>2) - a5;\ const int b7 = a7 - (a1>>2);\ DST(0, b0 + b7);\ DST(1, b2 + b5);\ DST(2, b4 + b3);\ DST(3, b6 + b1);\ DST(4, b6 - b1);\ DST(5, b4 - b3);\ DST(6, b2 - b5);\ DST(7, b0 - b7);\ } static void add8x8_idct8( uint8_t *dst, int16_t dct[8][8] ) { int i; dct[0][0] += 32; // rounding for the >>6 at the end #define SRC(x) dct[x][i] #define DST(x,rhs) dct[x][i] = (rhs) for( i = 0; i < 8; i++ ) IDCT8_1D #undef SRC #undef DST #define SRC(x) dct[i][x] #define DST(x,rhs) dst[i + x*FDEC_STRIDE] = x264_clip_uint8( dst[i + x*FDEC_STRIDE] + ((rhs) >> 6) ); for( i = 0; i < 8; i++ ) IDCT8_1D #undef SRC #undef DST } static void add16x16_idct8( uint8_t *dst, int16_t dct[4][8][8] ) { add8x8_idct8( &dst[0], dct[0] ); add8x8_idct8( &dst[8], dct[1] ); add8x8_idct8( &dst[8*FDEC_STRIDE+0], dct[2] ); add8x8_idct8( &dst[8*FDEC_STRIDE+8], dct[3] ); } /**************************************************************************** * x264_dct_init: ****************************************************************************/ void x264_dct_init( int cpu, x264_dct_function_t *dctf ) { dctf->sub4x4_dct = sub4x4_dct; dctf->add4x4_idct = add4x4_idct; dctf->sub8x8_dct = sub8x8_dct; dctf->add8x8_idct = add8x8_idct; dctf->sub16x16_dct = sub16x16_dct; dctf->add16x16_idct = add16x16_idct; dctf->sub8x8_dct8 = sub8x8_dct8; dctf->add8x8_idct8 = add8x8_idct8; dctf->sub16x16_dct8 = sub16x16_dct8; dctf->add16x16_idct8 = add16x16_idct8; dctf->dct4x4dc = dct4x4dc; dctf->idct4x4dc = idct4x4dc; #ifdef HAVE_MMX if( cpu&X264_CPU_MMX ) { dctf->sub4x4_dct = x264_sub4x4_dct_mmx; dctf->add4x4_idct = x264_add4x4_idct_mmx; dctf->dct4x4dc = x264_dct4x4dc_mmx; dctf->idct4x4dc = x264_idct4x4dc_mmx; #ifndef ARCH_X86_64 dctf->sub8x8_dct = x264_sub8x8_dct_mmx; dctf->sub16x16_dct = x264_sub16x16_dct_mmx; dctf->add8x8_idct = x264_add8x8_idct_mmx; dctf->add16x16_idct = x264_add16x16_idct_mmx; dctf->sub8x8_dct8 = x264_sub8x8_dct8_mmx; dctf->sub16x16_dct8 = x264_sub16x16_dct8_mmx; dctf->add8x8_idct8 = x264_add8x8_idct8_mmx; dctf->add16x16_idct8= x264_add16x16_idct8_mmx; #endif } if( cpu&X264_CPU_SSE2 ) { dctf->sub8x8_dct8 = x264_sub8x8_dct8_sse2; dctf->sub16x16_dct8 = x264_sub16x16_dct8_sse2; dctf->add8x8_idct8 = x264_add8x8_idct8_sse2; dctf->add16x16_idct8= x264_add16x16_idct8_sse2; dctf->sub8x8_dct = x264_sub8x8_dct_sse2; dctf->sub16x16_dct = x264_sub16x16_dct_sse2; dctf->add8x8_idct = x264_add8x8_idct_sse2; dctf->add16x16_idct = x264_add16x16_idct_sse2; } #endif //HAVE_MMX #ifdef ARCH_PPC if( cpu&X264_CPU_ALTIVEC ) { dctf->sub4x4_dct = x264_sub4x4_dct_altivec; dctf->sub8x8_dct = x264_sub8x8_dct_altivec; dctf->sub16x16_dct = x264_sub16x16_dct_altivec; dctf->add4x4_idct = x264_add4x4_idct_altivec; dctf->add8x8_idct = x264_add8x8_idct_altivec; dctf->add16x16_idct = x264_add16x16_idct_altivec; dctf->sub8x8_dct8 = x264_sub8x8_dct8_altivec; dctf->sub16x16_dct8 = x264_sub16x16_dct8_altivec; dctf->add8x8_idct8 = x264_add8x8_idct8_altivec; dctf->add16x16_idct8= x264_add16x16_idct8_altivec; } #endif } void x264_dct_init_weights( void ) { int i, j; #pragma omp parallel for for( j=0; j<2; j++ ) { for( i=0; i<16; i++ ) x264_dct4_weight2_zigzag[j][i] = x264_dct4_weight2_tab[ x264_zigzag_scan4[j][i] ]; for( i=0; i<64; i++ ) x264_dct8_weight2_zigzag[j][i] = x264_dct8_weight2_tab[ x264_zigzag_scan8[j][i] ]; } } // gcc pessimizes multi-dimensional arrays here, even with constant indices #define ZIG(i,y,x) level[i] = dct[0][x*8+y]; #define ZIGZAG8_FRAME\ ZIG( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\ ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\ ZIG( 8,2,1) ZIG( 9,3,0) ZIG(10,4,0) ZIG(11,3,1)\ ZIG(12,2,2) ZIG(13,1,3) ZIG(14,0,4) ZIG(15,0,5)\ ZIG(16,1,4) ZIG(17,2,3) ZIG(18,3,2) ZIG(19,4,1)\ ZIG(20,5,0) ZIG(21,6,0) ZIG(22,5,1) ZIG(23,4,2)\ ZIG(24,3,3) ZIG(25,2,4) ZIG(26,1,5) ZIG(27,0,6)\ ZIG(28,0,7) ZIG(29,1,6) ZIG(30,2,5) ZIG(31,3,4)\ ZIG(32,4,3) ZIG(33,5,2) ZIG(34,6,1) ZIG(35,7,0)\ ZIG(36,7,1) ZIG(37,6,2) ZIG(38,5,3) ZIG(39,4,4)\ ZIG(40,3,5) ZIG(41,2,6) ZIG(42,1,7) ZIG(43,2,7)\ ZIG(44,3,6) ZIG(45,4,5) ZIG(46,5,4) ZIG(47,6,3)\ ZIG(48,7,2) ZIG(49,7,3) ZIG(50,6,4) ZIG(51,5,5)\ ZIG(52,4,6) ZIG(53,3,7) ZIG(54,4,7) ZIG(55,5,6)\ ZIG(56,6,5) ZIG(57,7,4) ZIG(58,7,5) ZIG(59,6,6)\ ZIG(60,5,7) ZIG(61,6,7) ZIG(62,7,6) ZIG(63,7,7)\ #define ZIGZAG8_FIELD\ ZIG( 0,0,0) ZIG( 1,1,0) ZIG( 2,2,0) ZIG( 3,0,1)\ ZIG( 4,1,1) ZIG( 5,3,0) ZIG( 6,4,0) ZIG( 7,2,1)\ ZIG( 8,0,2) ZIG( 9,3,1) ZIG(10,5,0) ZIG(11,6,0)\ ZIG(12,7,0) ZIG(13,4,1) ZIG(14,1,2) ZIG(15,0,3)\ ZIG(16,2,2) ZIG(17,5,1) ZIG(18,6,1) ZIG(19,7,1)\ ZIG(20,3,2) ZIG(21,1,3) ZIG(22,0,4) ZIG(23,2,3)\ ZIG(24,4,2) ZIG(25,5,2) ZIG(26,6,2) ZIG(27,7,2)\ ZIG(28,3,3) ZIG(29,1,4) ZIG(30,0,5) ZIG(31,2,4)\ ZIG(32,4,3) ZIG(33,5,3) ZIG(34,6,3) ZIG(35,7,3)\ ZIG(36,3,4) ZIG(37,1,5) ZIG(38,0,6) ZIG(39,2,5)\ ZIG(40,4,4) ZIG(41,5,4) ZIG(42,6,4) ZIG(43,7,4)\ ZIG(44,3,5) ZIG(45,1,6) ZIG(46,2,6) ZIG(47,4,5)\ ZIG(48,5,5) ZIG(49,6,5) ZIG(50,7,5) ZIG(51,3,6)\ ZIG(52,0,7) ZIG(53,1,7) ZIG(54,4,6) ZIG(55,5,6)\ ZIG(56,6,6) ZIG(57,7,6) ZIG(58,2,7) ZIG(59,3,7)\ ZIG(60,4,7) ZIG(61,5,7) ZIG(62,6,7) ZIG(63,7,7) #define ZIGZAG4_FRAME\ ZIG( 0,0,0) ZIG( 1,0,1) ZIG( 2,1,0) ZIG( 3,2,0)\ ZIG( 4,1,1) ZIG( 5,0,2) ZIG( 6,0,3) ZIG( 7,1,2)\ ZIG( 8,2,1) ZIG( 9,3,0) ZIG(10,3,1) ZIG(11,2,2)\ ZIG(12,1,3) ZIG(13,2,3) ZIG(14,3,2) ZIG(15,3,3) #define ZIGZAG4_FIELD\ ZIG( 0,0,0) ZIG( 1,1,0) ZIG( 2,0,1) ZIG( 3,2,0)\ ZIG( 4,3,0) ZIG( 5,1,1) ZIG( 6,2,1) ZIG( 7,3,1)\ ZIG( 8,0,2) ZIG( 9,1,2) ZIG(10,2,2) ZIG(11,3,2)\ ZIG(12,0,3) ZIG(13,1,3) ZIG(14,2,3) ZIG(15,3,3) static void zigzag_scan_8x8_frame( int16_t level[64], int16_t dct[8][8] ) { ZIGZAG8_FRAME } static void zigzag_scan_8x8_field( int16_t level[64], int16_t dct[8][8] ) { ZIGZAG8_FIELD } #undef ZIG #define ZIG(i,y,x) level[i] = dct[0][x*4+y]; static void zigzag_scan_4x4_frame( int16_t level[16], int16_t dct[4][4] ) { ZIGZAG4_FRAME } static void zigzag_scan_4x4_field( int16_t level[16], int16_t dct[4][4] ) { *(uint32_t*)level = *(uint32_t*)dct; ZIG(2,0,1) ZIG(3,2,0) ZIG(4,3,0) ZIG(5,1,1) *(uint32_t*)(level+6) = *(uint32_t*)(*dct+6); *(uint64_t*)(level+8) = *(uint64_t*)(*dct+8); *(uint64_t*)(level+12) = *(uint64_t*)(*dct+12); } #undef ZIG #define ZIG(i,y,x) {\ int oe = x+y*FENC_STRIDE;\ int od = x+y*FDEC_STRIDE;\ level[i] = p_src[oe] - p_dst[od];\ } #define COPY4x4\ *(uint32_t*)(p_dst+0*FDEC_STRIDE) = *(uint32_t*)(p_src+0*FENC_STRIDE);\ *(uint32_t*)(p_dst+1*FDEC_STRIDE) = *(uint32_t*)(p_src+1*FENC_STRIDE);\ *(uint32_t*)(p_dst+2*FDEC_STRIDE) = *(uint32_t*)(p_src+2*FENC_STRIDE);\ *(uint32_t*)(p_dst+3*FDEC_STRIDE) = *(uint32_t*)(p_src+3*FENC_STRIDE); #define COPY8x8\ *(uint64_t*)(p_dst+0*FDEC_STRIDE) = *(uint64_t*)(p_src+0*FENC_STRIDE);\ *(uint64_t*)(p_dst+1*FDEC_STRIDE) = *(uint64_t*)(p_src+1*FENC_STRIDE);\ *(uint64_t*)(p_dst+2*FDEC_STRIDE) = *(uint64_t*)(p_src+2*FENC_STRIDE);\ *(uint64_t*)(p_dst+3*FDEC_STRIDE) = *(uint64_t*)(p_src+3*FENC_STRIDE);\ *(uint64_t*)(p_dst+4*FDEC_STRIDE) = *(uint64_t*)(p_src+4*FENC_STRIDE);\ *(uint64_t*)(p_dst+5*FDEC_STRIDE) = *(uint64_t*)(p_src+5*FENC_STRIDE);\ *(uint64_t*)(p_dst+6*FDEC_STRIDE) = *(uint64_t*)(p_src+6*FENC_STRIDE);\ *(uint64_t*)(p_dst+7*FDEC_STRIDE) = *(uint64_t*)(p_src+7*FENC_STRIDE); static void zigzag_sub_4x4_frame( int16_t level[16], const uint8_t *p_src, uint8_t *p_dst ) { ZIGZAG4_FRAME COPY4x4 } static void zigzag_sub_4x4_field( int16_t level[16], const uint8_t *p_src, uint8_t *p_dst ) { ZIGZAG4_FIELD COPY4x4 } static void zigzag_sub_8x8_frame( int16_t level[64], const uint8_t *p_src, uint8_t *p_dst ) { ZIGZAG8_FRAME COPY8x8 } static void zigzag_sub_8x8_field( int16_t level[64], const uint8_t *p_src, uint8_t *p_dst ) { ZIGZAG8_FIELD COPY8x8 } #undef ZIG #undef COPY4x4 static void zigzag_interleave_8x8_cavlc( int16_t *dst, int16_t *src ) { int i,j; for( i=0; i<4; i++ ) for( j=0; j<16; j++ ) dst[i*16+j] = src[i+j*4]; } void x264_zigzag_init( int cpu, x264_zigzag_function_t *pf, int b_interlaced ) { if( b_interlaced ) { pf->scan_8x8 = zigzag_scan_8x8_field; pf->scan_4x4 = zigzag_scan_4x4_field; pf->sub_8x8 = zigzag_sub_8x8_field; pf->sub_4x4 = zigzag_sub_4x4_field; #ifdef HAVE_MMX if( cpu&X264_CPU_MMXEXT ) pf->scan_4x4 = x264_zigzag_scan_4x4_field_mmxext; #endif #ifdef ARCH_PPC if( cpu&X264_CPU_ALTIVEC ) pf->scan_4x4 = x264_zigzag_scan_4x4_field_altivec; #endif } else { pf->scan_8x8 = zigzag_scan_8x8_frame; pf->scan_4x4 = zigzag_scan_4x4_frame; pf->sub_8x8 = zigzag_sub_8x8_frame; pf->sub_4x4 = zigzag_sub_4x4_frame; #ifdef HAVE_MMX if( cpu&X264_CPU_MMX ) pf->scan_4x4 = x264_zigzag_scan_4x4_frame_mmx; if( cpu&X264_CPU_MMXEXT ) pf->scan_8x8 = x264_zigzag_scan_8x8_frame_mmxext; if( cpu&X264_CPU_SSE2_IS_FAST ) pf->scan_8x8 = x264_zigzag_scan_8x8_frame_sse2; if( cpu&X264_CPU_SSSE3 ) { pf->sub_4x4 = x264_zigzag_sub_4x4_frame_ssse3; pf->scan_8x8 = x264_zigzag_scan_8x8_frame_ssse3; } if( cpu&X264_CPU_PHADD_IS_FAST ) pf->scan_4x4 = x264_zigzag_scan_4x4_frame_ssse3; #endif #ifdef ARCH_PPC if( cpu&X264_CPU_ALTIVEC ) pf->scan_4x4 = x264_zigzag_scan_4x4_frame_altivec; #endif } pf->interleave_8x8_cavlc = zigzag_interleave_8x8_cavlc; #ifdef HAVE_MMX if( cpu&X264_CPU_MMX ) pf->interleave_8x8_cavlc = x264_zigzag_interleave_8x8_cavlc_mmx; #endif }
GB_unaryop__abs_uint16_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint16_uint8 // op(A') function: GB_tran__abs_uint16_uint8 // C type: uint16_t // A type: uint8_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint16_t z = (uint16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint16_uint8 ( uint16_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint16_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sse.h
/* SPDX-License-Identifier: MIT * * Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, * modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Copyright: * 2017-2020 Evan Nemerson <evan@nemerson.com> * 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com> * 2015 Brandon Rowlett <browlett@nvidia.com> * 2015 Ken Fast <kfast@gdeb.com> */ #if !defined(SIMDE_X86_SSE_H) #define SIMDE_X86_SSE_H #include "mmx.h" #if defined(_WIN32) #include <windows.h> #endif HEDLEY_DIAGNOSTIC_PUSH SIMDE_DISABLE_UNWANTED_DIAGNOSTICS SIMDE_BEGIN_DECLS_ typedef union { #if defined(SIMDE_VECTOR_SUBSCRIPT) SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN(16) simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #endif SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else SIMDE_ALIGN(16) int8_t i8[16]; SIMDE_ALIGN(16) int16_t i16[8]; SIMDE_ALIGN(16) int32_t i32[4]; SIMDE_ALIGN(16) int64_t i64[2]; SIMDE_ALIGN(16) uint8_t u8[16]; SIMDE_ALIGN(16) uint16_t u16[8]; SIMDE_ALIGN(16) uint32_t u32[4]; SIMDE_ALIGN(16) uint64_t u64[2]; #if defined(SIMDE_HAVE_INT128_) SIMDE_ALIGN(16) simde_int128 i128[1]; SIMDE_ALIGN(16) simde_uint128 u128[1]; #endif SIMDE_ALIGN(16) simde_float32 f32[4]; SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)]; SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)]; #endif SIMDE_ALIGN(16) simde__m64_private m64_private[2]; SIMDE_ALIGN(16) simde__m64 m64[2]; #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_ALIGN(16) __m128 n; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN(16) int8x16_t neon_i8; SIMDE_ALIGN(16) int16x8_t neon_i16; SIMDE_ALIGN(16) int32x4_t neon_i32; SIMDE_ALIGN(16) int64x2_t neon_i64; SIMDE_ALIGN(16) uint8x16_t neon_u8; SIMDE_ALIGN(16) uint16x8_t neon_u16; SIMDE_ALIGN(16) uint32x4_t neon_u32; SIMDE_ALIGN(16) uint64x2_t neon_u64; SIMDE_ALIGN(16) float32x4_t neon_f32; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_ALIGN(16) float64x2_t neon_f64; #endif #elif defined(SIMDE_WASM_SIMD128_NATIVE) SIMDE_ALIGN(16) v128_t wasm_v128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32; #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64; SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64; #endif #endif } simde__m128_private; #if defined(SIMDE_X86_SSE_NATIVE) typedef __m128 simde__m128; #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) typedef float32x4_t simde__m128; #elif defined(SIMDE_WASM_SIMD128_NATIVE) typedef v128_t simde__m128; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128; #elif defined(SIMDE_VECTOR_SUBSCRIPT) typedef simde_float32 simde__m128 SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS; #else typedef simde__m128_private simde__m128; #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) typedef simde__m128 __m128; #endif HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect"); HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect"); #if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF) HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned"); HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned"); #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_private(simde__m128_private v) { simde__m128 r; simde_memcpy(&r, &v, sizeof(r)); return r; } SIMDE_FUNCTION_ATTRIBUTES simde__m128_private simde__m128_to_private(simde__m128 v) { simde__m128_private r; simde_memcpy(&r, &v, sizeof(r)); return r; } #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32) #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64) #endif #endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */ #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32) #if defined(SIMDE_BUG_GCC_95782) SIMDE_FUNCTION_ATTRIBUTES SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128_to_altivec_f32(simde__m128 value) { simde__m128_private r_ = simde__m128_to_private(value); return r_.altivec_f32; } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) { simde__m128_private r_; r_.altivec_f32 = value; return simde__m128_from_private(r_); } #else SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32) #endif #if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64) SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64) #endif #endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */ enum { #if defined(SIMDE_X86_SSE_NATIVE) SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST, SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN, SIMDE_MM_ROUND_UP = _MM_ROUND_UP, SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO #else SIMDE_MM_ROUND_NEAREST = 0x0000, SIMDE_MM_ROUND_DOWN = 0x2000, SIMDE_MM_ROUND_UP = 0x4000, SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000 #endif }; #if defined(_MM_FROUND_TO_NEAREST_INT) # define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT # define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF # define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF # define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO # define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION # define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC # define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC #else # define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00 # define SIMDE_MM_FROUND_TO_NEG_INF 0x01 # define SIMDE_MM_FROUND_TO_POS_INF 0x02 # define SIMDE_MM_FROUND_TO_ZERO 0x03 # define SIMDE_MM_FROUND_CUR_DIRECTION 0x04 # define SIMDE_MM_FROUND_RAISE_EXC 0x00 # define SIMDE_MM_FROUND_NO_EXC 0x08 #endif #define SIMDE_MM_FROUND_NINT \ (SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_FLOOR \ (SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_CEIL \ (SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_TRUNC \ (SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_RINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC) #define SIMDE_MM_FROUND_NEARBYINT \ (SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC) #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT) # define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT # define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF # define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF # define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO # define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION # define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC # define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT # define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR # define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL # define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC # define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT # define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT #endif SIMDE_FUNCTION_ATTRIBUTES unsigned int SIMDE_MM_GET_ROUNDING_MODE(void) { #if defined(SIMDE_X86_SSE_NATIVE) return _MM_GET_ROUNDING_MODE(); #elif defined(SIMDE_HAVE_FENV_H) unsigned int vfe_mode; switch (fegetround()) { #if defined(FE_TONEAREST) case FE_TONEAREST: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; #endif #if defined(FE_TOWARDZERO) case FE_TOWARDZERO: vfe_mode = SIMDE_MM_ROUND_DOWN; break; #endif #if defined(FE_UPWARD) case FE_UPWARD: vfe_mode = SIMDE_MM_ROUND_UP; break; #endif #if defined(FE_DOWNWARD) case FE_DOWNWARD: vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO; break; #endif default: vfe_mode = SIMDE_MM_ROUND_NEAREST; break; } return vfe_mode; #else return SIMDE_MM_ROUND_NEAREST; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE() #endif SIMDE_FUNCTION_ATTRIBUTES void SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) { #if defined(SIMDE_X86_SSE_NATIVE) _MM_SET_ROUNDING_MODE(a); #elif defined(SIMDE_HAVE_FENV_H) int fe_mode = FE_TONEAREST; switch (a) { #if defined(FE_TONEAREST) case SIMDE_MM_ROUND_NEAREST: fe_mode = FE_TONEAREST; break; #endif #if defined(FE_TOWARDZERO) case SIMDE_MM_ROUND_TOWARD_ZERO: fe_mode = FE_TOWARDZERO; break; #endif #if defined(FE_DOWNWARD) case SIMDE_MM_ROUND_DOWN: fe_mode = FE_DOWNWARD; break; #endif #if defined(FE_UPWARD) case SIMDE_MM_ROUND_UP: fe_mode = FE_UPWARD; break; #endif default: return; } fesetround(fe_mode); #else (void) a; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a) #endif SIMDE_FUNCTION_ATTRIBUTES uint32_t simde_mm_getcsr (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_getcsr(); #else return SIMDE_MM_GET_ROUNDING_MODE(); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_getcsr() simde_mm_getcsr() #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_setcsr (uint32_t a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_setcsr(a); #else SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a)); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) #define _mm_setcsr(a) simde_mm_setcsr(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_round_ps (simde__m128 a, int rounding) SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) { simde__m128_private r_, a_ = simde__m128_to_private(a); /* For architectures which lack a current direction SIMD instruction. * * Note that NEON actually has a current rounding mode instruction, * but in ARMv8+ the rounding mode is ignored and nearest is always * used, so we treat ARMv7 as having a rounding mode but ARMv8 as * not. */ #if \ defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \ defined(SIMDE_ARM_NEON_A32V8) if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION) rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13; #endif switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) { case SIMDE_MM_FROUND_CUR_DIRECTION: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndiq_f32(a_.neon_f32); #elif defined(simde_math_nearbyintf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_nearbyintf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEAREST_INT: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndaq_f32(a_.neon_f32); #elif defined(simde_math_roundf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_roundf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_NEG_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndmq_f32(a_.neon_f32); #elif defined(simde_math_floorf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_floorf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_POS_INF: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndpq_f32(a_.neon_f32); #elif defined(simde_math_ceilf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_ceilf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; case SIMDE_MM_FROUND_TO_ZERO: #if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32)); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0 r_.neon_f32 = vrndq_f32(a_.neon_f32); #elif defined(simde_math_truncf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_truncf(a_.f32[i]); } #else HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); #endif break; default: HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd()); } return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE4_1_NATIVE) #define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding) #endif #if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) #define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps(e3, e2, e1, e0); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) SIMDE_ALIGN(16) simde_float32 data[4] = { e0, e1, e2, e3 }; r_.neon_f32 = vld1q_f32(data); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3); #else r_.f32[0] = e0; r_.f32[1] = e1; r_.f32[2] = e2; r_.f32[3] = e3; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ps1 (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ps1(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(a); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) (void) a; return vec_splats(a); #else return simde_mm_set_ps(a, a, a, a); #endif } #define simde_mm_set1_ps(a) simde_mm_set_ps1(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ps1(a) simde_mm_set_ps1(a) # define _mm_set1_ps(a) simde_mm_set1_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_move_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_move_ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = { 16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }; r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3); #else r_.f32[0] = b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_move_ss(a, b) simde_mm_move_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 + b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] + b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ps(a, b) simde_mm_add_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_add_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_add_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_add_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0); float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0); // the upper values in the result must be the remnants of <a>. r_.neon_f32 = vaddq_f32(a_.neon_f32, value); #else r_.f32[0] = a_.f32[0] + b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_add_ss(a, b) simde_mm_add_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_and_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_and_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 & b_.i32; #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_and_ps(a, b) simde_mm_and_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_andnot_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_andnot_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32 & b_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]) & b_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_xor_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_xor_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f ^ b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] ^ b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_or_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_or_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32f = a_.i32f | b_.i32f; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i] | b_.u32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_or_ps(a, b) simde_mm_or_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_not_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE2_NATIVE) /* Note: we use ints instead of floats because we don't want cmpeq * to return false for (NaN, NaN) */ __m128i ai = _mm_castps_si128(a); return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vmvnq_s32(a_.neon_i32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_not(a_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = ~a_.i32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = ~(a_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) { /* This function is for when you want to blend two elements together * according to a mask. It is similar to _mm_blendv_ps, except that * it is undefined whether the blend is based on the highest bit in * each lane (like blendv) or just bitwise operations. This allows * us to implement the function efficiently everywhere. * * Basically, you promise that all the lanes in mask are either 0 or * ~0. */ #if defined(SIMDE_X86_SSE4_1_NATIVE) return _mm_blendv_ps(a, b, mask); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b), mask_ = simde__m128_to_private(mask); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint32_t wa SIMDE_VECTOR(16); uint32_t wb SIMDE_VECTOR(16); uint32_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u16); SIMDE_CONVERT_VECTOR_(wb, b_.u16); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u16, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b) # define _m_pavgw(a, b) simde_mm_avg_pu16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_avg_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_) uint16_t wa SIMDE_VECTOR(16); uint16_t wb SIMDE_VECTOR(16); uint16_t wr SIMDE_VECTOR(16); SIMDE_CONVERT_VECTOR_(wa, a_.u8); SIMDE_CONVERT_VECTOR_(wb, b_.u8); wr = (wa + wb + 1) >> 1; SIMDE_CONVERT_VECTOR_(r_.u8, wr); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b) # define _m_pavgb(a, b) simde_mm_avg_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_abs_ps(simde__m128 a) { #if defined(SIMDE_X86_AVX512F_NATIVE) return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vabsq_f32(a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_abs(a_.altivec_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_fabsf(a_.f32[i]); } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpeq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpge_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpge_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpgt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpgt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmple_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmple_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmplt_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION) /* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float)) is missing from XL C/C++ v16.1.1, though the documentation (table 89 on page 432 of the IBM XL C/C++ for Linux Compiler Reference, Version 16.1.1) shows that it should be present. Both GCC and clang support it. */ r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32)); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32)); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpneq_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmplt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmple_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpgt_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ps(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) { return simde_mm_cmpge_ss(a, b); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) /* Note: NEON does not have ordered compare builtin Need to compare a eq a and b eq b to check for NaN Do AND of results to get final */ uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vandq_u32(ceqaa, ceqbb); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpunord_ps(a, b); #elif defined(SIMDE_WASM_SIMD128_NATIVE) return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32); r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb)); #elif defined(simde_math_isnanf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) return _mm_cmpunord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #else return a_.f32[0] == b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #else return a_.f32[0] >= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #else return a_.f32[0] > b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #else return a_.f32[0] <= b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #else return a_.f32[0] < b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_comineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_comineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #else return a_.f32[0] != b_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) { simde__m128_private r_, dest_ = simde__m128_to_private(dest), src_ = simde__m128_to_private(src); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0))); r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) const v128_t sign_pos = wasm_f32x4_splat(-0.0f); r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos); #elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) #if !defined(HEDLEY_IBM_VERSION) r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32); #else r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32); #endif #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f)); r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos); #elif defined(SIMDE_IEEE754_STORAGE) (void) src_; (void) dest_; simde__m128 sign_pos = simde_mm_set1_ps(-0.0f); r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]); } #endif return simde__m128_from_private(r_); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) { return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest); } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_pi2ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0 SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else a_ = simde__m128_to_private(a); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvt_si2ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_si2ss(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); r_.i32[1] = a_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvt_ss2si(a); #elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0); #else simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION)); return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */ r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0)))); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { simde_float32 v = a_.i16[i]; r_.f32[i] = v; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a); simde__m64_private b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32); r_.m64_private[1] = a_.m64_private[1]; #else r_.f32[0] = (simde_float32) b_.i32[0]; r_.f32[1] = (simde_float32) b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi32x2_ps(a, b); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32); SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32); #else r_.f32[0] = (simde_float32) a_.i32[0]; r_.f32[1] = (simde_float32) a_.i32[1]; r_.f32[2] = (simde_float32) b_.i32[0]; r_.f32[3] = (simde_float32) b_.i32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpi8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpi8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8)))); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]); r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]); r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]); r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi16 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi16(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi32(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i])); } #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtps_pi8 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtps_pi8(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471) /* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to * i16, combine with an all-zero vector of i16 (which will become the upper * half), narrow to i8. */ float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)); float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)); float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min)); r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) { if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX)) r_.i8[i] = INT8_MAX; else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN)) r_.i8[i] = INT8_MIN; else r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i])); } /* Note: the upper half is undefined */ #endif return simde__m64_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu16_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu16_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (simde_float32) a_.u16[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtpu8_ps (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtpu8_ps(a); #else simde__m128_private r_; simde__m64_private a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8)))); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]); } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtsi32_ss(a, b); #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) { r_.i32[i] = a_.i32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtsi64_ss(a, b); #else return _mm_cvtsi64x_ss(a, b); #endif #else simde__m128_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0); #else r_ = a_; r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b) #endif SIMDE_FUNCTION_ATTRIBUTES simde_float32 simde_mm_cvtss_f32 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtss_f32(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vgetq_lane_f32(a_.neon_f32, 0); #else return a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtss_si32 (simde__m128 a) { return simde_mm_cvt_ss2si(a); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvtss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) #if !defined(__PGI) return _mm_cvtss_si64(a); #else return _mm_cvtss_si64x(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0))); #else return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0])); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_cvtt_ps2pi (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_cvtt_ps2pi(a); #else simde__m64_private r_; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32)); #elif defined(SIMDE_CONVERT_VECTOR_) SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]); } #endif return simde__m64_from_private(r_); #endif } #define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a)) # define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int32_t simde_mm_cvtt_ss2si (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cvtt_ss2si(a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]); #endif #endif } #define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a)) # define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a)) #endif SIMDE_FUNCTION_ATTRIBUTES int64_t simde_mm_cvttss_si64 (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER) #if defined(__PGI) return _mm_cvttss_si64x(a); #else return _mm_cvttss_si64(a); #endif #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0)); #else return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_cmpord_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(simde_math_isnanf) r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0); SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.u32[i] = a_.u32[i]; } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip0 = vrecpeq_f32(b_.neon_f32); float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32)); r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 / b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] / b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ps(a, b) simde_mm_div_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_div_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_div_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_div_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = a_.f32[0] / b_.f32[0]; SIMDE_VECTORIZE for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_div_ss(a, b) simde_mm_div_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int16_t simde_mm_extract_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private a_ = simde__m64_to_private(a); return a_.i16[imm8]; } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION) # if HEDLEY_HAS_WARNING("-Wvector-conversion") /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ # define simde_mm_extract_pi16(a, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8)) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8) #endif #define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8)) # define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) { simde__m64_private r_, a_ = simde__m64_to_private(a); r_.i64[0] = a_.i64[0]; r_.i16[imm8] = i; return simde__m64_from_private(r_); } #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # if HEDLEY_HAS_WARNING("-Wvector-conversion") /* https://bugs.llvm.org/show_bug.cgi?id=44589 */ # define ssimde_mm_insert_pi16(a, i, imm8) ( \ HEDLEY_DIAGNOSTIC_PUSH \ _Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \ (_mm_insert_pi16((a), (i), (imm8))) \ HEDLEY_DIAGNOSTIC_POP \ ) # else # define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8) # endif #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) # define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8))) #endif #define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) # define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { simde_assert_aligned(16, mem_addr); #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) r_.altivec_f32 = vec_vsx_ld(0, mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_ld(0, mem_addr); #else r_ = *SIMDE_ALIGN_CAST(simde__m128_private const*, mem_addr); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ps1 (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ps1(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_dup_f32(mem_addr); #else r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr)); #endif return simde__m128_from_private(r_); #endif } #define simde_mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ps1(mem_addr) simde_mm_load_ps1(mem_addr) # define _mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_load_ss (simde_float32 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_load_ss(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0); #else r_.f32[0] = *mem_addr; r_.i32[1] = 0; r_.i32[2] = 0; r_.i32[3] = 0; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr))); #else simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr); r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr)) #endif /* The SSE documentation says that there are no alignment requirements for mem_addr. Unfortunately they used the __m64 type for the argument which is supposed to be 8-byte aligned, so some compilers (like clang with -Wcast-align) will generate a warning if you try to cast, say, a simde_float32* to a simde__m64* for this function. I think the choice of argument type is unfortunate, but I do think we need to stick to it here. If there is demand I can always add something like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */ SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vcombine_f32(vld1_f32( HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32)); #else simde__m64_private b_; simde_memcpy(&b_, mem_addr, sizeof(b_)); r_.i32[0] = b_.i32[0]; r_.i32[1] = b_.i32[1]; r_.i32[2] = a_.i32[2]; r_.i32[3] = a_.i32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { simde_assert_aligned(16, mem_addr); #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadr_ps(mem_addr); #else simde__m128_private r_, v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr)); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrev64q_f32(v_.neon_f32); r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2); #elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && 0 /* TODO: XLC documentation has it, but it doesn't seem to work. * More investigation is necessary. */ r_.altivec_f32 = vec_reve(a_.altivec_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0); #else r_.f32[0] = v_.f32[3]; r_.f32[1] = v_.f32[2]; r_.f32[2] = v_.f32[1]; r_.f32[3] = v_.f32[0]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_loadu_ps(mem_addr); #else simde__m128_private r_; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_v128_load(mem_addr); #else r_.f32[0] = mem_addr[0]; r_.f32[1] = mem_addr[1]; r_.f32[2] = mem_addr[2]; r_.f32[3] = mem_addr[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr)); #else simde__m64_private a_ = simde__m64_to_private(a), mask_ = simde__m64_to_private(mask); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++) if (mask_.i8[i] < 0) mem_addr[i] = a_.i8[i]; #endif } #define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) # define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr))) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b) # define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ps(a, b) simde_mm_max_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_max_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_max_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b) # define _m_pmaxub(a, b) simde_mm_max_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_max_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_max_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_max_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_max_ss(a, b) simde_mm_max_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pi16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pi16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) { r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b) # define _m_pminsw(a, b) simde_mm_min_pi16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ps(a, b); #elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE) return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b))); #elif defined(SIMDE_WASM_SIMD128_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128); #else r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128)); #endif return simde__m128_from_private(r_); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_FAST_NANS) r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32); #else r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32)); #endif return simde__m128_from_private(r_); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) simde__m128 mask = simde_mm_cmplt_ps(a, b); return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i]; } return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ps(a, b) simde_mm_min_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_min_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_min_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8); #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i]; } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pminub(a, b) simde_mm_min_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b) # define _m_pminub(a, b) simde_mm_min_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_min_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_min_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_min_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #else r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_min_ss(a, b) simde_mm_min_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movehl_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movehl_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a32 = vget_high_f32(a_.neon_f32); float32x2_t b32 = vget_high_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(b32, a32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3); #else r_.f32[0] = b_.f32[2]; r_.f32[1] = b_.f32[3]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_movelh_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_movelh_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a10 = vget_low_f32(a_.neon_f32); float32x2_t b10 = vget_low_f32(b_.neon_f32); r_.neon_f32 = vcombine_f32(a10, b10); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = b_.f32[0]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_pi8 (simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_pi8(a); #else simde__m64_private a_ = simde__m64_to_private(a); int r = 0; #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) uint8x8_t input = a_.neon_u8; const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0}; const uint8x8_t mask_and = vdup_n_u8(0x80); const int8x8_t mask_shift = vld1_s8(xr); const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift); uint8x8_t lo = mask_result; r = vaddv_u8(lo); #else const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]); SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < nmemb ; i++) { r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i); } #endif return r; #endif } #define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a) # define _m_pmovmskb(a) simde_mm_movemask_pi8(a) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_movemask_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_movemask_ps(a); #else int r = 0; simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) static const int32_t shift_amount[] = { 0, 1, 2, 3 }; const int32x4_t shift = vld1q_s32(shift_amount); uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31); return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift))); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) // Shift out everything but the sign bits with a 32-bit unsigned shift right. uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31)); // Merge the two pairs together with a 64-bit unsigned shift right + add. uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31)); // Extract the result. return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2); #else SIMDE_VECTORIZE_REDUCTION(|:r) for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) { r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i; } #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_movemask_ps(a) simde_mm_movemask_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 * b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] * b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_mul_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_mul_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_mul_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] * b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_mulhi_pu16(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16); const uint32x4_t t2 = vshrq_n_u32(t1, 16); const uint16x4_t t3 = vmovn_u32(t2); r_.neon_u16 = t3; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) { r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16))); } #endif return simde__m64_from_private(r_); #endif } #define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b) # define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_prefetch (char const* p, int i) { #if defined(HEDLEY_GCC_VERSION) __builtin_prefetch(p); #else (void) p; #endif (void) i; } #if defined(SIMDE_X86_SSE_NATIVE) # define simde_mm_prefetch(p, i) _mm_prefetch(p, i) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_prefetch(p, i) simde_mm_prefetch(p, i) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_negate_ps(simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0))); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \ (!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0)) r_.altivec_f32 = vec_neg(a_.altivec_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vnegq_f32(a_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128); #elif defined(SIMDE_VECTOR_NEGATE) r_.f32 = -a_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = -a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t recip = vrecpeq_f32(a_.neon_f32); #if SIMDE_ACCURACY_PREFERENCE > 0 for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) { recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32)); } #endif r_.neon_f32 = recip; #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) r_.altivec_f32 = vec_re(a_.altivec_f32); #elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) r_.f32 = 1.0f / a_.f32; #elif defined(SIMDE_IEEE754_STORAGE) /* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { int32_t ix; simde_float32 fx = a_.f32[i]; simde_memcpy(&ix, &fx, sizeof(ix)); int32_t x = INT32_C(0x7EF311C3) - ix; simde_float32 temp; simde_memcpy(&temp, &x, sizeof(temp)); r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx); } #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / a_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ps(a) simde_mm_rcp_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rcp_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rcp_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rcp_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); r_.f32[0] = 1.0f / a_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rcp_ss(a) simde_mm_rcp_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vrsqrteq_f32(a_.neon_f32); #elif defined(SIMDE_IEEE754_STORAGE) /* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf Pages 100 - 103 */ SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1); #else simde_float32 x = a_.f32[i]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[i] = x; #endif } #elif defined(simde_math_sqrtf) SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_rsqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_rsqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0); #elif defined(SIMDE_IEEE754_STORAGE) { #if SIMDE_ACCURACY_PREFERENCE <= 0 r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1); #else simde_float32 x = a_.f32[0]; simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x; int32_t ix; simde_memcpy(&ix, &x, sizeof(ix)); #if SIMDE_ACCURACY_PREFERENCE == 1 ix = INT32_C(0x5F375A82) - (ix >> 1); #else ix = INT32_C(0x5F37599E) - (ix >> 1); #endif simde_memcpy(&x, &ix, sizeof(x)); #if SIMDE_ACCURACY_PREFERENCE >= 2 x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); #endif x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x); r_.f32[0] = x; #endif } r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #elif defined(simde_math_sqrtf) r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) return _mm_sad_pu8(a, b); #else simde__m64_private r_, a_ = simde__m64_to_private(a), b_ = simde__m64_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8)); uint16_t r0 = t[0] + t[1] + t[2] + t[3]; r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0); #else uint16_t sum = 0; #if defined(SIMDE_HAVE_STDLIB_H) SIMDE_VECTORIZE_REDUCTION(+:sum) for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) { sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i])); } r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum); r_.i16[1] = 0; r_.i16[2] = 0; r_.i16[3] = 0; #else HEDLEY_UNREACHABLE(); #endif #endif return simde__m64_from_private(r_); #endif } #define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b) # define _m_psadbw(a, b) simde_mm_sad_pu8(a, b) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_set_ss (simde_float32 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_set_ss(a); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0); #else return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_set_ss(a) simde_mm_set_ss(a) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setr_ps(e3, e2, e1, e0); #else return simde_mm_set_ps(e0, e1, e2, e3); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_setzero_ps (void) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_setzero_ps(); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) return vdupq_n_f32(SIMDE_FLOAT32_C(0.0)); #elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) return vec_splats(SIMDE_FLOAT32_C(0.0)); #else simde__m128 r; simde_memset(&r, 0, sizeof(r)); return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_setzero_ps() simde_mm_setzero_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_undefined_ps (void) { simde__m128_private r_; #if defined(SIMDE_HAVE_UNDEFINED128) r_.n = _mm_undefined_ps(); #elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) r_ = simde__m128_to_private(simde_mm_setzero_ps()); #endif return simde__m128_from_private(r_); } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_undefined_ps() simde_mm_undefined_ps() #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_POP #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_x_mm_setone_ps (void) { simde__m128 t = simde_mm_setzero_ps(); return simde_mm_cmpeq_ps(t, t); } SIMDE_FUNCTION_ATTRIBUTES void simde_mm_sfence (void) { /* TODO: Use Hedley. */ #if defined(SIMDE_X86_SSE_NATIVE) _mm_sfence(); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7)) __atomic_thread_fence(__ATOMIC_SEQ_CST); #elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__) #if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9) __atomic_thread_fence(__ATOMIC_SEQ_CST); #else atomic_thread_fence(memory_order_seq_cst); #endif #elif defined(_MSC_VER) MemoryBarrier(); #elif HEDLEY_HAS_EXTENSION(c_atomic) __c11_atomic_thread_fence(__ATOMIC_SEQ_CST); #elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) __sync_synchronize(); #elif defined(_OPENMP) #pragma omp critical(simde_mm_sfence_) { } #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sfence() simde_mm_sfence() #endif #define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w)) #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w) #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \ const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \ simde__m64_from_private((simde__m64_private) { .i16 = \ SIMDE_SHUFFLE_VECTOR_(16, 8, \ (simde__tmp_a_).i16, \ (simde__tmp_a_).i16, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3), \ (((imm8) >> 6) & 3)) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m64 simde_mm_shuffle_pi16 (simde__m64 a, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m64_private r_; simde__m64_private a_ = simde__m64_to_private(a); for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) { r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3]; } HEDLEY_DIAGNOSTIC_PUSH #if HEDLEY_HAS_WARNING("-Wconditional-uninitialized") # pragma clang diagnostic ignored "-Wconditional-uninitialized" #endif return simde__m64_from_private(r_); HEDLEY_DIAGNOSTIC_POP } #endif #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI) # define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8) #else # define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8) # define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8) #endif #if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI) # define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8) #elif defined(SIMDE_SHUFFLE_VECTOR_) # define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \ simde__m128_from_private((simde__m128_private) { .f32 = \ SIMDE_SHUFFLE_VECTOR_(32, 16, \ simde__m128_to_private(a).f32, \ simde__m128_to_private(b).f32, \ (((imm8) ) & 3), \ (((imm8) >> 2) & 3), \ (((imm8) >> 4) & 3) + 4, \ (((imm8) >> 6) & 3) + 4) }); })) #else SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8) SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) { simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[(imm8 >> 0) & 3]; r_.f32[1] = a_.f32[(imm8 >> 2) & 3]; r_.f32[2] = b_.f32[(imm8 >> 4) & 3]; r_.f32[3] = b_.f32[(imm8 >> 6) & 3]; return simde__m128_from_private(r_); } #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ps (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ps(a); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vsqrtq_f32(a_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x4_t est = vrsqrteq_f32(a_.neon_f32); for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) { est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est); } r_.neon_f32 = vmulq_f32(a_.neon_f32, est); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128); #elif defined(simde_math_sqrt) SIMDE_VECTORIZE for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) { r_.f32[i] = simde_math_sqrtf(a_.f32[i]); } #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sqrt_ss (simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sqrt_ss(a); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sqrt_ps(a)); #else simde__m128_private r_, a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32_t value = vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0); r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0); #elif defined(simde_math_sqrtf) r_.f32[0] = simde_math_sqrtf(a_.f32[0]); r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; #else HEDLEY_UNREACHABLE(); #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) { simde_assert_aligned(16, mem_addr); #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE) vec_vsx_st(a_.altivec_f32, 0, mem_addr); #elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE) vec_st(a_.altivec_f32, 0, mem_addr); #elif defined(SIMDE_WASM_SIMD128_NATIVE) wasm_v128_store(mem_addr, a_.wasm_v128); #else SIMDE_VECTORIZE_ALIGNED(mem_addr:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr[i] = a_.f32[i]; } #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ps1 (simde_float32 mem_addr[4], simde__m128 a) { simde_assert_aligned(16, mem_addr); #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ps1(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 0); mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 0); mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 0); mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0); #else SIMDE_VECTORIZE_ALIGNED(mem_addr:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr[i] = a_.f32[0]; } #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ps1(mem_addr, a) simde_mm_store_ps1(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_store_ss(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_lane_f32(mem_addr, a_.neon_f32, 0); #else *mem_addr = a_.f32[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) { simde_assert_aligned(16, mem_addr); #if defined(SIMDE_X86_SSE_NATIVE) _mm_store1_ps(mem_addr, a); #else simde_mm_store_ps1(mem_addr, a); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr); simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest_->f32[0] = vgetq_lane_f32(a_.neon_f32, 2); dest_->f32[1] = vgetq_lane_f32(a_.neon_f32, 3); #else dest_->f32[0] = a_.f32[2]; dest_->f32[1] = a_.f32[3]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr); simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest_->neon_f32 = vget_low_f32(a_.neon_f32); #else dest_->f32[0] = a_.f32[0]; dest_->f32[1] = a_.f32[1]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) { simde_assert_aligned(16, mem_addr); #if defined(SIMDE_X86_SSE_NATIVE) _mm_storer_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_SHUFFLE_VECTOR_) a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0); simde_mm_store_ps(mem_addr, simde__m128_from_private(a_)); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 3); mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 2); mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 1); mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0); #else SIMDE_VECTORIZE_ALIGNED(mem_addr:16) for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) { mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i]; } #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) { #if defined(SIMDE_X86_SSE_NATIVE) _mm_storeu_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(mem_addr, a_.neon_f32); #else simde_memcpy(mem_addr, &a_, sizeof(a_)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_WASM_SIMD128_NATIVE) r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128); #elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) r_.f32 = a_.f32 - b_.f32; #else SIMDE_VECTORIZE for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) { r_.f32[i] = a_.f32[i] - b_.f32[i]; } #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_sub_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_sub_ss(a, b); #elif (SIMDE_NATURAL_VECTOR_SIZE > 0) return simde_mm_move_ss(a, simde_mm_sub_ps(a, b)); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); r_.f32[0] = a_.f32[0] - b_.f32[0]; r_.f32[1] = a_.f32[1]; r_.f32[2] = a_.f32[2]; r_.f32[3] = a_.f32[3]; return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomieq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] == b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] == b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomige_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] >= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] >= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomigt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] > b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] > b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomile_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] <= b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] <= b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomilt_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan)); uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32); r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] < b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] < b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES int simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_ucomineq_ss(a, b); #else simde__m128_private a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); int r; #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32); uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32); uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan); uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32)); r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0); #elif defined(SIMDE_HAVE_FENV_H) fenv_t envp; int x = feholdexcept(&envp); r = a_.f32[0] != b_.f32[0]; if (HEDLEY_LIKELY(x == 0)) fesetenv(&envp); #else r = a_.f32[0] != b_.f32[0]; #endif return r; #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b)) #endif #if defined(SIMDE_X86_SSE_NATIVE) # if defined(__has_builtin) # if __has_builtin(__builtin_ia32_undef128) # define SIMDE_HAVE_UNDEFINED128 # endif # elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER) # define SIMDE_HAVE_UNDEFINED128 # endif #endif #if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_) HEDLEY_DIAGNOSTIC_PUSH SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_ #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpackhi_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_high_f32(a_.neon_f32); float32x2_t b1 = vget_high_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7); #else r_.f32[0] = a_.f32[2]; r_.f32[1] = b_.f32[2]; r_.f32[2] = a_.f32[3]; r_.f32[3] = b_.f32[3]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES simde__m128 simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) { #if defined(SIMDE_X86_SSE_NATIVE) return _mm_unpacklo_ps(a, b); #else simde__m128_private r_, a_ = simde__m128_to_private(a), b_ = simde__m128_to_private(b); #if defined(SIMDE_ARM_NEON_A64V8_NATIVE) r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32); #elif defined(SIMDE_SHUFFLE_VECTOR_) r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5); #elif defined(SIMDE_ARM_NEON_A32V7_NATIVE) float32x2_t a1 = vget_low_f32(a_.neon_f32); float32x2_t b1 = vget_low_f32(b_.neon_f32); float32x2x2_t result = vzip_f32(a1, b1); r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]); #else r_.f32[0] = a_.f32[0]; r_.f32[1] = b_.f32[0]; r_.f32[2] = a_.f32[1]; r_.f32[3] = b_.f32[1]; #endif return simde__m128_from_private(r_); #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) { #if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) _mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a); #else simde__m64_private* dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr), a_ = simde__m64_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) dest->i64[0] = vget_lane_s64(a_.neon_i64, 0); #else dest->i64[0] = a_.i64[0]; #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a)) #endif SIMDE_FUNCTION_ATTRIBUTES void simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) { simde_assert_aligned(16, mem_addr); #if defined(SIMDE_X86_SSE_NATIVE) _mm_stream_ps(mem_addr, a); #else simde__m128_private a_ = simde__m128_to_private(a); #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) vst1q_f32(SIMDE_ASSUME_ALIGNED(16, mem_addr), a_.neon_f32); #else simde_memcpy(SIMDE_ASSUME_ALIGNED(16, mem_addr), &a_, sizeof(a_)); #endif #endif } #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a)) #endif #if defined(SIMDE_ARM_NEON_A32V7_NATIVE) #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \ float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \ row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \ vget_low_f32(ROW23.val[0])); \ row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \ vget_low_f32(ROW23.val[1])); \ row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \ vget_high_f32(ROW23.val[0])); \ row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \ vget_high_f32(ROW23.val[1])); \ } while (0) #else #define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \ do { \ simde__m128 tmp3, tmp2, tmp1, tmp0; \ tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \ tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \ tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \ tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \ row0 = simde_mm_movelh_ps(tmp0, tmp2); \ row1 = simde_mm_movehl_ps(tmp2, tmp0); \ row2 = simde_mm_movelh_ps(tmp1, tmp3); \ row3 = simde_mm_movehl_ps(tmp3, tmp1); \ } while (0) #endif #if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES) # define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) #endif #if defined(_MM_EXCEPT_INVALID) # define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID #else # define SIMDE_MM_EXCEPT_INVALID (0x0001) #endif #if defined(_MM_EXCEPT_DENORM) # define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM #else # define SIMDE_MM_EXCEPT_DENORM (0x0002) #endif #if defined(_MM_EXCEPT_DIV_ZERO) # define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO #else # define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004) #endif #if defined(_MM_EXCEPT_OVERFLOW) # define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW #else # define SIMDE_MM_EXCEPT_OVERFLOW (0x0008) #endif #if defined(_MM_EXCEPT_UNDERFLOW) # define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW #else # define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010) #endif #if defined(_MM_EXCEPT_INEXACT) # define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT #else # define SIMDE_MM_EXCEPT_INEXACT (0x0020) #endif #if defined(_MM_EXCEPT_MASK) # define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK #else # define SIMDE_MM_EXCEPT_MASK \ (SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \ SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \ SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT) #endif #if defined(_MM_MASK_INVALID) # define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID #else # define SIMDE_MM_MASK_INVALID (0x0080) #endif #if defined(_MM_MASK_DENORM) # define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM #else # define SIMDE_MM_MASK_DENORM (0x0100) #endif #if defined(_MM_MASK_DIV_ZERO) # define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO #else # define SIMDE_MM_MASK_DIV_ZERO (0x0200) #endif #if defined(_MM_MASK_OVERFLOW) # define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW #else # define SIMDE_MM_MASK_OVERFLOW (0x0400) #endif #if defined(_MM_MASK_UNDERFLOW) # define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW #else # define SIMDE_MM_MASK_UNDERFLOW (0x0800) #endif #if defined(_MM_MASK_INEXACT) # define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT #else # define SIMDE_MM_MASK_INEXACT (0x1000) #endif #if defined(_MM_MASK_MASK) # define SIMDE_MM_MASK_MASK _MM_MASK_MASK #else # define SIMDE_MM_MASK_MASK \ (SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \ SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \ SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT) #endif #if defined(_MM_FLUSH_ZERO_MASK) # define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK #else # define SIMDE_MM_FLUSH_ZERO_MASK (0x8000) #endif #if defined(_MM_FLUSH_ZERO_ON) # define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON #else # define SIMDE_MM_FLUSH_ZERO_ON (0x8000) #endif #if defined(_MM_FLUSH_ZERO_OFF) # define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF #else # define SIMDE_MM_FLUSH_ZERO_OFF (0x0000) #endif SIMDE_END_DECLS_ HEDLEY_DIAGNOSTIC_POP #endif /* !defined(SIMDE_X86_SSE_H) */
flow_rate_slip_utility.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Miguel Maso Sotomayor // #ifndef KRATOS_FLOW_RATE_SLIP_UTILITY_H_INCLUDED #define KRATOS_FLOW_RATE_SLIP_UTILITY_H_INCLUDED // System includes // External includes // Project includes #include "utilities/coordinate_transformation_utilities.h" namespace Kratos { ///@addtogroup ShallowWaterApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @brief Tools to apply slip conditions * @detail A utility to rotate the local contributions of certain nodes to the system matrix, which is required to apply slip conditions in arbitrary directions. */ template<class TLocalMatrixType, class TLocalVectorType, class TValueType> class FlowRateSlipUtility : public CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,TValueType> { public: ///@name Type Definitions ///@{ /// Pointer definition of FlowRateSlipUtility KRATOS_CLASS_POINTER_DEFINITION(FlowRateSlipUtility); typedef CoordinateTransformationUtils<TLocalMatrixType,TLocalVectorType,TValueType> BaseType; typedef std::size_t SizeType; typedef Node<3> NodeType; typedef Geometry<NodeType> GeometryType; ///@} ///@name Life Cycle ///@{ /// Default constructor. FlowRateSlipUtility() : BaseType(2,3,SLIP) {} /// Destructor. virtual ~FlowRateSlipUtility() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /** * @brief Apply slip boundary conditions to the rotated local contributions. * @detail This function takes the local system contributions rotated so each * node's velocities are expressed using a base oriented with its normal * and imposes that the normal velocity is equal to the mesh velocity in * the normal direction. * @param rLocalMatrix A reference to the LHS local matrix * @param rLocalVector A reference to the RHS local vector * @param rGeometry A reference to the geometry of the element or condition */ virtual void ApplySlipCondition( TLocalMatrixType& rLocalMatrix, TLocalVectorType& rLocalVector, GeometryType& rGeometry) const override { const SizeType LocalSize = rLocalVector.size(); // We expect this to work both with elements and conditions if (LocalSize > 0) { for (SizeType it_node = 0; it_node < rGeometry.PointsNumber(); ++it_node) { if (this->IsSlip(rGeometry[it_node])) { // We fix the first dof (normal velocity) for each rotated block SizeType j = it_node * BaseType::GetBlockSize(); array_1d<double,3> vel = rGeometry[it_node].FastGetSolutionStepValue(MOMENTUM); array_1d<double,3> n = rGeometry[it_node].FastGetSolutionStepValue(NORMAL); this->Normalize(n); for (SizeType i = 0; i < j; ++i) // Skip term (i,i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } for (SizeType i = j+1; i < LocalSize; ++i) { rLocalMatrix(i,j) = 0.0; rLocalMatrix(j,i) = 0.0; } rLocalVector(j) = - inner_prod(n, vel); rLocalMatrix(j,j) = 1.0; } } } } /** * @brief RHS only version of ApplySlipCondition * @param rLocalVector A reference to the RHS local vector * @param rGeometry A reference to the geometry of the element or condition */ virtual void ApplySlipCondition( TLocalVectorType& rLocalVector, GeometryType& rGeometry) const override { if (rLocalVector.size() > 0) { for (SizeType it_node = 0; it_node < rGeometry.PointsNumber(); ++it_node) { if (this->IsSlip(rGeometry[it_node])) { // We fix the first dof (normal velocity) for each rotated block SizeType j = it_node * BaseType::GetBlockSize(); array_1d<double,3> vel = rGeometry[it_node].FastGetSolutionStepValue(MOMENTUM); array_1d<double,3> n = rGeometry[it_node].FastGetSolutionStepValue(NORMAL); this->Normalize(n); rLocalVector[j] = inner_prod(n, vel); } } } } /** * @brief Transform nodal velocities to the rotated coordinates (aligned with each node's normal) * @param rModelPart A reference to the model part * @see RecoverVelocities */ virtual void RotateVelocities(ModelPart& rModelPart) const override { TLocalVectorType vel(BaseType::GetDomainSize()); TLocalVectorType tmp(BaseType::GetDomainSize()); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(vel, tmp) for(int i = 0; i < static_cast<int>(rModelPart.Nodes().size()); ++i) { ModelPart::NodeIterator it_node = it_begin + i; if (this->IsSlip(*it_node)) { // For shallow water problems, domain size is always 2 BoundedMatrix<double,2,2> rot; BaseType::LocalRotationOperatorPure(rot, *it_node); array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(MOMENTUM); for(SizeType i = 0; i < 2; i++) { vel[i] = r_velocity[i]; } noalias(tmp) = prod(rot, vel); for(SizeType i = 0; i < 2; i++) { r_velocity[i] = tmp[i]; } } } } /** * Transform nodal velocities from the rotated system to the original one * @param rModelPart A reference to the model part * @see RotateVelocities */ virtual void RecoverVelocities(ModelPart& rModelPart) const override { TLocalVectorType vel(BaseType::GetDomainSize()); TLocalVectorType tmp(BaseType::GetDomainSize()); ModelPart::NodeIterator it_begin = rModelPart.NodesBegin(); #pragma omp parallel for firstprivate(vel, tmp) for(int i = 0; i<static_cast<int>(rModelPart.Nodes().size()); ++i) { ModelPart::NodeIterator it_node = it_begin + i; if( this->IsSlip(*it_node) ) { // For shallow water problems, domain size is always 2 BoundedMatrix<double,2,2> rot; BaseType::LocalRotationOperatorPure(rot,*it_node); array_1d<double,3>& r_velocity = it_node->FastGetSolutionStepValue(MOMENTUM); for(SizeType i = 0; i < 2; i++) { vel[i] = r_velocity[i]; } noalias(tmp) = prod(trans(rot),vel); for(SizeType i = 0; i < 2; i++) { r_velocity[i] = tmp[i]; } } } } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /** * Turn back information as a string. */ virtual std::string Info() const override { std::stringstream buffer; buffer << "FlowRateSlipUtility"; return buffer.str(); } /** * Print information about this object. */ virtual void PrintInfo(std::ostream& rOStream) const override { rOStream << "FlowRateSlipUtility"; } ///@} ///@name Friends ///@{ ///@} private: ///@name Un accessible methods ///@{ /// Assignment operator. // FlowRateSlipUtility& operator=(FlowRateSlipUtility const& rOther) {} /// Copy constructor. // FlowRateSlipUtility(FlowRateSlipUtility const& rOther) {} ///@} }; // Class FlowRateSlipUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::istream& operator >> ( std::istream& rIStream, FlowRateSlipUtility<TLocalMatrixType, TLocalVectorType,TValueType>& rThis) { return rIStream; } /// output stream function template<class TLocalMatrixType, class TLocalVectorType, class TValueType> inline std::ostream& operator << ( std::ostream& rOStream, const FlowRateSlipUtility<TLocalMatrixType, TLocalVectorType,TValueType>& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block } // namespace Kratos. #endif // KRATOS_FLOW_RATE_SLIP_UTILITY_H_INCLUDED defined
tally.h
#ifndef OPENMC_TALLIES_TALLY_H #define OPENMC_TALLIES_TALLY_H #include "openmc/constants.h" #include "openmc/tallies/trigger.h" #include "pugixml.hpp" #include "xtensor/xfixed.hpp" #include "xtensor/xtensor.hpp" #include <memory> // for unique_ptr #include <unordered_map> #include <string> #include <vector> namespace openmc { //============================================================================== //! A user-specified flux-weighted (or current) measurement. //============================================================================== class Tally { public: Tally(); void init_from_xml(pugi::xml_node node); void set_scores(pugi::xml_node node); void set_scores(std::vector<std::string> scores); void set_nuclides(pugi::xml_node node); //---------------------------------------------------------------------------- // Methods for getting and setting filter/stride data. const std::vector<int32_t>& filters() const {return filters_;} int32_t filters(int i) const {return filters_[i];} void set_filters(const int32_t filter_indices[], int n); int32_t strides(int i) const {return strides_[i];} int32_t n_filter_bins() const {return n_filter_bins_;} //---------------------------------------------------------------------------- // Other methods. void init_triggers(pugi::xml_node node); void init_results(); void reset(); void accumulate(); //---------------------------------------------------------------------------- // Major public data members. int id_; //!< User-defined identifier std::string name_; //!< User-defined name int type_ {TALLY_VOLUME}; //!< e.g. volume, surface current //! Event type that contributes to this tally int estimator_ {ESTIMATOR_TRACKLENGTH}; //! Whether this tally is currently being updated bool active_ {false}; //! Number of realizations int n_realizations_ {0}; std::vector<int> scores_; //!< Filter integrands (e.g. flux, fission) //! Index of each nuclide to be tallied. -1 indicates total material. std::vector<int> nuclides_ {-1}; //! True if this tally has a bin for every nuclide in the problem bool all_nuclides_ {false}; //! Results for each bin -- the first dimension of the array is for scores //! (e.g. flux, total reaction rate, fission reaction rate, etc.) and the //! second dimension of the array is for the combination of filters //! (e.g. specific cell, specific energy group, etc.) xt::xtensor<double, 3> results_; //---------------------------------------------------------------------------- // Miscellaneous public members. // We need to have quick access to some filters. The following gives indices // for various filters that could be in the tally or C_NONE if they are not // present. int energyout_filter_ {C_NONE}; int delayedgroup_filter_ {C_NONE}; bool depletion_rx_ {false}; //!< Has depletion reactions (e.g. (n,2n)) std::vector<Trigger> triggers_; int deriv_ {C_NONE}; //!< Index of a TallyDerivative object for diff tallies. private: //---------------------------------------------------------------------------- // Private data. std::vector<int32_t> filters_; //!< Filter indices in global filters array //! Index strides assigned to each filter to support 1D indexing. std::vector<int32_t> strides_; int32_t n_filter_bins_ {0}; }; //============================================================================== // Global variable declarations //============================================================================== namespace model { extern std::vector<std::unique_ptr<Tally>> tallies; extern std::vector<int> active_tallies; extern std::vector<int> active_analog_tallies; extern std::vector<int> active_tracklength_tallies; extern std::vector<int> active_collision_tallies; extern std::vector<int> active_meshsurf_tallies; extern std::vector<int> active_surface_tallies; extern std::unordered_map<int, int> tally_map; } namespace simulation { //! Global tallies (such as k-effective estimators) extern xt::xtensor_fixed<double, xt::xshape<N_GLOBAL_TALLIES, 3>> global_tallies; //! Number of realizations for global tallies extern "C" int32_t n_realizations; } // It is possible to protect accumulate operations on global tallies by using an // atomic update. However, when multiple threads accumulate to the same global // tally, it can cause a higher cache miss rate due to invalidation. Thus, we // use threadprivate variables to accumulate global tallies and then reduce at // the end of a generation. extern double global_tally_absorption; extern double global_tally_collision; extern double global_tally_tracklength; extern double global_tally_leakage; #pragma omp threadprivate(global_tally_absorption, global_tally_collision, \ global_tally_tracklength, global_tally_leakage) //============================================================================== // Non-member functions //============================================================================== //! Read tally specification from tallies.xml void read_tallies_xml(); //! \brief Accumulate the sum of the contributions from each history within the //! batch to a new random variable void accumulate_tallies(); //! Determine which tallies should be active void setup_active_tallies(); // Alias for the type returned by xt::adapt(...). N is the dimension of the // multidimensional array template <std::size_t N> using adaptor_type = xt::xtensor_adaptor<xt::xbuffer_adaptor<double*&, xt::no_ownership>, N>; #ifdef OPENMC_MPI //! Collect all tally results onto master process void reduce_tally_results(); #endif void free_memory_tally(); } // namespace openmc #endif // OPENMC_TALLIES_TALLY_H
config.h
/* config.h. Generated from config.in by configure. */ /* config.in. Generated from configure.ac by autoheader. */ /* Check that config.h is #included before system headers (this works only for glibc, but that should be enough). */ #if defined(__GLIBC__) && !defined(__FreeBSD_kernel__) && !defined(__CONFIG_H__) # error config.h must be #included before system headers #endif #define __CONFIG_H__ 1 /* Define if building universal (internal helper macro) */ /* #undef AC_APPLE_UNIVERSAL_BUILD */ /* Define to 1 if translation of program messages to the user's native language is requested. */ /* #undef ENABLE_NLS */ /* Define to enable linker plugins */ #define ENABLE_PLUGINS 1 /* Define to do multi-threaded linking */ /* #undef ENABLE_THREADS */ /* Default big endian (true or false) */ #define GOLD_DEFAULT_BIG_ENDIAN false /* Default machine code */ #define GOLD_DEFAULT_MACHINE EM_X86_64 /* Default OSABI code */ #define GOLD_DEFAULT_OSABI ELFOSABI_NONE /* Default size (32 or 64) */ #define GOLD_DEFAULT_SIZE 64 /* Define to 1 if you have the <byteswap.h> header file. */ /* #undef HAVE_BYTESWAP_H */ /* Define to 1 if you have the `chsize' function. */ /* #undef HAVE_CHSIZE */ /* Define to 1 if you have the declaration of `asprintf', and to 0 if you don't. */ #define HAVE_DECL_ASPRINTF 1 /* Define to 1 if you have the declaration of `basename', and to 0 if you don't. */ #define HAVE_DECL_BASENAME 0 /* Define to 1 if you have the declaration of `ffs', and to 0 if you don't. */ #define HAVE_DECL_FFS 1 /* Define to 1 if you have the declaration of `memmem', and to 0 if you don't. */ #define HAVE_DECL_MEMMEM 1 /* Define to 1 if you have the declaration of `snprintf', and to 0 if you don't. */ #define HAVE_DECL_SNPRINTF 1 /* Define to 1 if you have the declaration of `strndup', and to 0 if you don't. */ #define HAVE_DECL_STRNDUP 1 /* Define to 1 if you have the declaration of `strverscmp', and to 0 if you don't. */ #define HAVE_DECL_STRVERSCMP 0 /* Define to 1 if you have the declaration of `vasprintf', and to 0 if you don't. */ #define HAVE_DECL_VASPRINTF 1 /* Define to 1 if you have the declaration of `vsnprintf', and to 0 if you don't. */ #define HAVE_DECL_VSNPRINTF 1 /* Define to 1 if you have the <dlfcn.h> header file. */ #define HAVE_DLFCN_H 1 /* Define to 1 if you have the <ext/hash_map> header file. */ #define HAVE_EXT_HASH_MAP 1 /* Define to 1 if you have the <ext/hash_set> header file. */ #define HAVE_EXT_HASH_SET 1 /* Define to 1 if you have the `fallocate' function. */ /* #undef HAVE_FALLOCATE */ /* Define to 1 if you have the `ffsll' function. */ #define HAVE_FFSLL 1 /* Define to 1 if you have the `ftruncate' function. */ #define HAVE_FTRUNCATE 1 /* Define to 1 if you have the <inttypes.h> header file. */ #define HAVE_INTTYPES_H 1 /* Define if your <locale.h> file defines LC_MESSAGES. */ #define HAVE_LC_MESSAGES 1 /* Define to 1 if you have the <locale.h> header file. */ #define HAVE_LOCALE_H 1 /* Define to 1 if you have the `mallinfo' function. */ /* #undef HAVE_MALLINFO */ /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mmap' function. */ #define HAVE_MMAP 1 /* Define to 1 if you have the mremap function with MREMAP_MAYMOVE support */ /* #undef HAVE_MREMAP */ /* Define if compiler supports #pragma omp threadprivate */ #define HAVE_OMP_SUPPORT 1 /* Define to 1 if you have the `posix_fallocate' function. */ /* #undef HAVE_POSIX_FALLOCATE */ /* Define to 1 if you have the `pread' function. */ #define HAVE_PREAD 1 /* Define to 1 if you have the `readv' function. */ #define HAVE_READV 1 /* Define to 1 if you have the `setlocale' function. */ #define HAVE_SETLOCALE 1 /* Define if struct stat has a field st_mtim with timespec for mtime */ #define HAVE_STAT_ST_MTIM 1 /* Define to 1 if you have the <stdint.h> header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the `sysconf' function. */ #define HAVE_SYSCONF 1 /* Define to 1 if you have the <sys/mman.h> header file. */ #define HAVE_SYS_MMAN_H 1 /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define to support 32-bit big-endian targets */ #define HAVE_TARGET_32_BIG 1 /* Define to support 32-bit little-endian targets */ #define HAVE_TARGET_32_LITTLE 1 /* Define to support 64-bit big-endian targets */ #define HAVE_TARGET_64_BIG 1 /* Define to support 64-bit little-endian targets */ #define HAVE_TARGET_64_LITTLE 1 /* Define if attributes work on C++ templates */ #define HAVE_TEMPLATE_ATTRIBUTES 1 /* Define to 1 if you have the `times' function. */ #define HAVE_TIMES 1 /* Define if std::tr1::hash<off_t> is usable */ #define HAVE_TR1_HASH_OFF_T 1 /* Define to 1 if you have the <tr1/unordered_map> header file. */ #define HAVE_TR1_UNORDERED_MAP 1 /* Define if ::std::tr1::unordered_map::rehash is usable */ #define HAVE_TR1_UNORDERED_MAP_REHASH 1 /* Define to 1 if you have the <tr1/unordered_set> header file. */ #define HAVE_TR1_UNORDERED_SET 1 /* Define to 1 if you have the <unistd.h> header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if you have the <unordered_map> header file. */ /* #undef HAVE_UNORDERED_MAP */ /* Define to 1 if you have the <unordered_set> header file. */ /* #undef HAVE_UNORDERED_SET */ /* Define to 1 if you have the <windows.h> header file. */ /* #undef HAVE_WINDOWS_H */ /* Define to 1 if you have the <zlib.h> header file. */ #define HAVE_ZLIB_H 1 /* Default library search path */ #define LIB_PATH "/lib:/usr/lib" /* Whether configured as a native linker */ #define NATIVE_LINKER 1 /* Name of package */ #define PACKAGE "gold" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "" /* Define to the full name of this package. */ #define PACKAGE_NAME "gold" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "gold 0.1" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "gold" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "0.1" /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* System root for target files */ #define TARGET_SYSTEM_ROOT "/" /* Whether the system root can be relocated */ #define TARGET_SYSTEM_ROOT_RELOCATABLE 0 /* Enable extensions on AIX 3, Interix. */ #ifndef _ALL_SOURCE # define _ALL_SOURCE 1 #endif /* Enable GNU extensions on systems that have them. */ #ifndef _GNU_SOURCE # define _GNU_SOURCE 1 #endif /* Enable threading extensions on Solaris. */ #ifndef _POSIX_PTHREAD_SEMANTICS # define _POSIX_PTHREAD_SEMANTICS 1 #endif /* Enable extensions on HP NonStop. */ #ifndef _TANDEM_SOURCE # define _TANDEM_SOURCE 1 #endif /* Enable general extensions on Solaris. */ #ifndef __EXTENSIONS__ # define __EXTENSIONS__ 1 #endif /* Version number of package */ #define VERSION "0.1" /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN /* # undef WORDS_BIGENDIAN */ # endif #endif /* Define to 1 if on MINIX. */ /* #undef _MINIX */ /* Define to 2 if the system does not provide POSIX.1 features except with this defined. */ /* #undef _POSIX_1_SOURCE */ /* Define to 1 if you need to in order for `stat' and other things to work. */ /* #undef _POSIX_SOURCE */
imageutils.h
/** * Created by yanyuanchi on 2017/4/8. */ #ifndef SIFTYSIFTY_IMAGEUTILS_H #define SIFTYSIFTY_IMAGEUTILS_H #include "structs.h" #include "utils.h" #ifdef _OPENMP #include <omp.h> #endif namespace SiftySifty { /** * half sample the src Mat * @tparam T type * @param src src mat * @param dst dst mat * @return true/false */ template<class T> void halfSampleMat(Mat<T> *src, Mat<T> *dst) { if (nullptr == src || nullptr == dst || nullptr == src->data || nullptr == dst->data) { return; } int srcWidth = src->width; int srcHeight = src->height; int dstWidth = dst->width; int dstHeight = dst->height; if ((srcWidth >> 1) != dstWidth || (srcHeight >> 1) != dstHeight) { return; } int maxThreadNum = getHardwareCPUNum(); int threadIndex = 0; int stride = max_value((int) (roundf(1.0f * dstHeight / maxThreadNum)), 1); #pragma omp parallel for private(threadIndex) for (threadIndex = 0; threadIndex < maxThreadNum; ++threadIndex) { int start = threadIndex * stride; int end = (threadIndex == (maxThreadNum - 1)) ? dstHeight : min_value(start + stride, dstHeight); T *srcData = src->data + (srcWidth * (start << 1)); T *dstData = dst->data + (dstWidth * start); for (int y = start; y < end; ++y) { for (int x = 0; x < dstWidth; ++x) { dstData[x] = srcData[(x << 1)]; } srcData += (srcWidth + srcWidth); dstData += dstWidth; } } } /** * resize src to dst * @tparam T * @param src * @param dst * @return */ template<class T> void resizeMat(Mat<T> *src, Mat<T> *dst) { if (nullptr == src || nullptr == dst || nullptr == src->data || nullptr == dst->data) { return; } int srcWidth = src->width; int srcHeight = src->height; int dstWidth = dst->width; int dstHeight = dst->height; T *srcData = src->data; T *dstData = dst->data; if (srcWidth == dstWidth && srcHeight == dstHeight) { memcpy(dstData, srcData, sizeof(T) * srcWidth * srcHeight); return; } int32_t shift = 22; int64_t scale = (1 << (shift >> 1)); int64_t delta = (1 << (shift - 1)); float xRatio = 1.f * (srcWidth - 1.f) / dstWidth; float yRatio = 1.f * (srcHeight - 1.f) / dstHeight; int maxThreadNum = getHardwareCPUNum(); int threadIndex = 0; int stride = max_value((int) (roundf(1.0f * dstHeight / maxThreadNum)), 1); #pragma omp parallel for private(threadIndex) for (threadIndex = 0; threadIndex < maxThreadNum; ++threadIndex) { int start = threadIndex * stride; int end = (threadIndex == (maxThreadNum - 1)) ? dstHeight : min_value(start + stride, dstHeight); T *dstOffsetData = dstData + start * dstWidth; for (int y = start; y < end; ++y) { float yOffset = (y + 0.5f) * yRatio; int yUp = (int) floorf(yOffset); yOffset -= yUp; int64_t multUp = (int64_t) (yOffset * scale); int64_t multDown = scale - multUp; for (int x = 0; x < dstWidth; ++x) { float xOffset = (x + 0.5f) * xRatio; int xLeft = (int) floorf(xOffset); xOffset -= xLeft; int64_t multLeft = (int64_t) (xOffset * scale); int64_t multRight = scale - multLeft; T *srcOffsetData = srcData + yUp * srcWidth + xLeft; dstOffsetData[x] = (T) ((srcOffsetData[0] * multRight * multDown + srcOffsetData[1] * multLeft * multDown + srcOffsetData[srcWidth] * multRight * multUp + srcOffsetData[srcWidth + 1] * multLeft * multUp + delta) >> shift); } dstOffsetData += dstWidth; } } } /** * resize the src to dst * @tparam T * @param src * @param srcWidth * @param srcHeight * @param dst * @param dstWidth * @param dstHeight * @return */ template<class T> void resizeMat2(Mat<T> *srcMat, Mat<T> *dstMat) { if (nullptr == srcMat || nullptr == dstMat) { return; } T *src = srcMat->data; T *dst = dstMat->data; int srcWidth = srcMat->width; int srcHeight = srcMat->height; int dstWidth = dstMat->width; int dstHeight = dstMat->height; if (srcWidth == dstWidth && srcHeight == dstHeight) { memcpy(dst, src, sizeof(T) * srcWidth * srcHeight); return; } int32_t shift = 22; int64_t scale = (1 << (shift >> 1)); int64_t delta = (1 << (shift - 1)); /** * src = (dst + 0.5) * srcWidth / dstWidth - 0.5 */ float xRatio = 1.f * srcWidth / dstWidth; float yRatio = 1.f * srcHeight / dstHeight; int *xTable = (int*) malloc(sizeof(int) * 2 * dstWidth); int64_t *xMult = (int64_t*) malloc(sizeof(int64_t) * 2 * dstWidth); for (int x = 0; x < dstWidth; ++x) { float xOffset = (x + 0.5f) * xRatio - 0.5f; int xLeft; int64_t multLeft, multRight; if (0 >= xOffset) { xLeft = 0; multLeft = 0; multRight = scale; } else if (xOffset >= (srcWidth - 1)) { xLeft = srcWidth - 2; multLeft = scale; multRight = 0; } else { xLeft = (int) floorf(xOffset); xOffset -= xLeft; multLeft = (int64_t) (xOffset * scale); multRight = scale - multLeft; } xTable[(x << 1)] = xLeft; xTable[(x << 1) + 1] = xLeft + 1; xMult[(x << 1)] = multLeft; xMult[(x << 1) + 1] = multRight; } int maxThreadNum = getHardwareCPUNum(); int threadIndex = 0; int stride = max_value((int) (roundf(1.0f * dstHeight / maxThreadNum)), 1); #pragma omp parallel for private(threadIndex) for (threadIndex = 0; threadIndex < maxThreadNum; ++threadIndex) { int start = threadIndex * stride; int end = (threadIndex == (maxThreadNum - 1)) ? dstHeight : min_value(start + stride, dstHeight); T *dstData = dst + start * dstWidth; for (int y = start; y < end; ++y) { float yOffset = (y + 0.5f) * yRatio - 0.5f; int yUp; int64_t multUp, multDown; if (0 >= yOffset) { yUp = 0; multUp = 0; multDown = scale; } else if (yOffset >= (srcHeight - 1)) { yUp = srcHeight - 2; multUp = scale; multDown = 0; } else { yUp = (int) floorf(yOffset); yOffset -= yUp; multUp = (int64_t) (yOffset * scale); multDown = scale - multUp; } T *upSrc = src + yUp * srcWidth; T *downSrc = upSrc + srcWidth; for (int x = 0; x < dstWidth; ++x) { int x2 = (x << 1); dstData[x] = (T)((((upSrc[xTable[x2]] * xMult[x2+1] + upSrc[xTable[x2+1]]*xMult[x2]) * multDown + (downSrc[xTable[x2]] * xMult[x2+1] + downSrc[xTable[x2+1]]*xMult[x2]) * multUp) + delta) >> shift); } dstData += dstWidth; } } free(xTable); free(xMult); } template<class T1, class T2> void scaleMatByScale(T1 *src, T2 *dst, int width, int height, int scale) { int maxThreadNum = getHardwareCPUNum(); int threadIndex = 0; int stride = max_value((int) (roundf(1.0f * height / maxThreadNum)), 1); #pragma omp parallel for private(threadIndex) for (threadIndex = 0; threadIndex < maxThreadNum; ++threadIndex) { int start = threadIndex * stride; int end = (threadIndex == (maxThreadNum - 1)) ? height : min_value(start + stride, height); T1 *srcData = src + start * width; T2 *dstData = dst + start * width; int64_t length = (end - start) * width; int64_t limit = length - 3; int64_t i = 0; for (; i < limit; i += 4) { dstData[i] = (srcData[i] * scale); dstData[i + 1] = (srcData[i + 1] * scale); dstData[i + 2] = (srcData[i + 2] * scale); dstData[i + 3] = (srcData[i + 3] * scale); } for (; i < length; ++i) { dstData[i] = (srcData[i] * scale); } } } template<class T1, class T2> void scaleMatByShift(T1 *src, T2 *dst, int width, int height, int shift) { int maxThreadNum = getHardwareCPUNum(); int threadIndex = 0; int stride = max_value((int) (roundf(1.0f * height / maxThreadNum)), 1); #pragma omp parallel for private(threadIndex) for (threadIndex = 0; threadIndex < maxThreadNum; ++threadIndex) { int start = threadIndex * stride; int end = (threadIndex == (maxThreadNum - 1)) ? height : min_value(start + stride, height); T1 *srcData = src + start * width; T2 *dstData = dst + start * width; int64_t length = (end - start) * width; int64_t limit = length - 3; int64_t i = 0; for (; i < limit; i += 4) { dstData[i] = (srcData[i] << shift); dstData[i + 1] = (srcData[i + 1] << shift); dstData[i + 2] = (srcData[i + 2] << shift); dstData[i + 3] = (srcData[i + 3] << shift); } for (; i < length; ++i) { dstData[i] = (srcData[i] << shift); } } } template<class T> void subMat(T *src1, T *src2, T *dst, int width, int height) { int maxThreadNum = getHardwareCPUNum(); int threadIndex = 0; int stride = max_value((int) (roundf(1.0f * height / maxThreadNum)), 1); #pragma omp parallel for private(threadIndex) for (threadIndex = 0; threadIndex < maxThreadNum; ++threadIndex) { int start = threadIndex * stride; int end = (threadIndex == (maxThreadNum - 1)) ? height : min_value(start + stride, height); T *src1Data = src1 + start * width; T *src2Data = src2 + start * width; T *dstData = dst + start * width; int64_t length = (end - start) * width; int64_t limit = length - 3; int64_t i = 0; for (; i < limit; i += 4) { dstData[i] = src1Data[i] - src2Data[i]; dstData[i + 1] = src1Data[i + 1] - src2Data[i + 1]; dstData[i + 2] = src1Data[i + 2] - src2Data[i + 2]; dstData[i + 3] = src1Data[i + 3] - src2Data[i + 3]; } for (; i < length; ++i) { dstData[i] = src1Data[i] - src2Data[i]; } } } } #endif //SIFTYSIFTY_IMAGEUTILS_H
Pragma.h
//===- Pragma.h - Pragma registration and handling --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the PragmaHandler and PragmaTable interfaces. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LEX_PRAGMA_H #define LLVM_CLANG_LEX_PRAGMA_H #include "clang/Basic/LLVM.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringRef.h" #include <string> namespace clang { class PragmaNamespace; class Preprocessor; class Token; /** * Describes how the pragma was introduced, e.g., with \#pragma, * _Pragma, or __pragma. */ enum PragmaIntroducerKind { /** * The pragma was introduced via \#pragma. */ PIK_HashPragma, /** * The pragma was introduced via the C99 _Pragma(string-literal). */ PIK__Pragma, /** * The pragma was introduced via the Microsoft * __pragma(token-string). */ PIK___pragma }; /// PragmaHandler - Instances of this interface defined to handle the various /// pragmas that the language front-end uses. Each handler optionally has a /// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with /// that identifier is found. If a handler does not match any of the declared /// pragmas the handler with a null identifier is invoked, if it exists. /// /// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g. /// we treat "\#pragma STDC" and "\#pragma GCC" as namespaces that contain other /// pragmas. class PragmaHandler { std::string Name; public: PragmaHandler() = default; explicit PragmaHandler(StringRef name) : Name(name) {} virtual ~PragmaHandler(); StringRef getName() const { return Name; } virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) = 0; /// getIfNamespace - If this is a namespace, return it. This is equivalent to /// using a dynamic_cast, but doesn't require RTTI. virtual PragmaNamespace *getIfNamespace() { return nullptr; } }; /// EmptyPragmaHandler - A pragma handler which takes no action, which can be /// used to ignore particular pragmas. class EmptyPragmaHandler : public PragmaHandler { public: explicit EmptyPragmaHandler(StringRef Name = StringRef()); void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &FirstToken) override; }; /// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas, /// allowing hierarchical pragmas to be defined. Common examples of namespaces /// are "\#pragma GCC", "\#pragma STDC", and "\#pragma omp", but any namespaces /// may be (potentially recursively) defined. class PragmaNamespace : public PragmaHandler { /// Handlers - This is a map of the handlers in this namespace with their name /// as key. llvm::StringMap<PragmaHandler *> Handlers; public: explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {} ~PragmaNamespace() override; /// FindHandler - Check to see if there is already a handler for the /// specified name. If not, return the handler for the null name if it /// exists, otherwise return null. If IgnoreNull is true (the default) then /// the null handler isn't returned on failure to match. PragmaHandler *FindHandler(StringRef Name, bool IgnoreNull = true) const; /// AddPragma - Add a pragma to this namespace. void AddPragma(PragmaHandler *Handler); /// RemovePragmaHandler - Remove the given handler from the /// namespace. void RemovePragmaHandler(PragmaHandler *Handler); bool IsEmpty() const { return Handlers.empty(); } void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer, Token &Tok) override; PragmaNamespace *getIfNamespace() override { return this; } }; } // namespace clang #endif // LLVM_CLANG_LEX_PRAGMA_H
main.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int main(int argc, const char * argv[]) { int n = 100; if (argc == 1) { printf("Use: %s <número de items>\n", argv[0]); exit(0); } else { n = atoi(argv[1]); } int suma = 0; int i; int numeros[n]; double t_inicial = 0, t_final = 0; t_inicial = omp_get_wtime(); #pragma omp parallel for private(i) if (n > 100) for (i = 0; i < n; ++i) { numeros[i] = 1; } t_final = omp_get_wtime(); printf("La inicialización del vector demoró %.5f\n", t_final - t_inicial); t_inicial = omp_get_wtime(); #pragma omp parallel for private(i) reduction(+:suma) if (n > 100) for (i = 0; i < n; ++i) { suma += numeros[i]; int id = omp_get_thread_num(); printf("Hilo %d = [%d]\n", id, i); } t_final = omp_get_wtime(); printf("La suma del vector demoró %.5f\n", t_final - t_inicial); printf("La suma = %d\n", suma); return 0; }
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1,2),ceild(8*t2-Nz+5,8));t3<=min(floord(4*Nt+Ny-9,8),floord(4*t1+Ny-1,8));t3++) { for (t4=max(max(ceild(t1-62,64),ceild(8*t2-Nz-243,256)),ceild(8*t3-Ny-243,256));t4<=min(min(floord(4*Nt+Nx-9,256),floord(4*t1+Nx-1,256)),floord(8*t3+Nx-5,256));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(256*t4-Nx+5,4)),t1);t5<=min(min(min(2*t3,Nt-1),t1+1),64*t4+62);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
kncmbpush3.c
/* KNC C Library for Skeleton 3D Electromagnetic Vector PIC Code */ /* written by Viktor K. Decyk, UCLA and Ricardo Fonseca, ISCTE */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include <math.h> #include <string.h> #include <immintrin.h> #include "kncmbpush3.h" /*--------------------------------------------------------------------*/ void ckncgbppush3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1,int ipbc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. OpenMP/vector version using guard cells data read in tiles particles stored segmented array 190 flops/particle, 1 divide, 54 loads, 6 stores input: all, output: ppart, ek velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and omz = (q/m)*bz(x(t),y(t),z(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt z(t+dt)=z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + .25*(vz(t+dt/2) + vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float qtmh, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm; __m512 v_qtmh, v_dt, v_dtc, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \ vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \ omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1, \ v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp, \ v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy, \ v_oz,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ll = (mz < nz-loff ? mz : nz-loff) + 1; nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* time-centered kinetic energy */ /* sum1 += (acx*acx + acy*acy + acz*acz); */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* calculate cyclotron frequency */ /* omxt = qtmh*ox; */ /* omyt = qtmh*oy; */ /* omzt = qtmh*oz; */ e = _mm512_mul_ps(v_qtmh,v_ox); f = _mm512_mul_ps(v_qtmh,v_oy); g = _mm512_mul_ps(v_qtmh,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new velocity */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* new position */ /* dx = x + vx*dtc; */ /* dy = y + vy*dtc; */ /* dz = z + vz*dtc; */ v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* vz = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new velocity */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* new position */ dx = x + vx*dtc; dy = y + vy*dtc; dz = z + vz*dtc; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; vz = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new velocity */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); } /* normalize kinetic energy */ *ek += 0.5f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgbppushf3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. also determines list of particles which are leaving this tile OpenMP/vector version using guard cells data read in tiles particles stored segmented array 190 flops/particle, 1 divide, 54 loads, 6 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t)), omy = (q/m)*by(x(t),y(t),z(t)), and omz = (q/m)*bz(x(t),y(t),z(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt z(t+dt)=z(t) + vz(t+dt/2)*dt fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + .25*(vz(t+dt/2) + vz(t-dt/2))**2) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float qtmh, dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm; __m512 v_qtmh, v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; anx = (float) nx; any = (float) ny; anz = (float) nz; sum2 = 0.0; /* set boundary values */ v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \ y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \ omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \ edgelx,edgely,edgelz,edgerx,edgery,edgerz,sum1,v_noff,v_moff,v_loff, \ v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy, \ v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz,v_at,v_edgelx, \ v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d,v_sum1,a,b,c,d,e,f,g, \ h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* load local fields from global array */ nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* time-centered kinetic energy */ /* sum1 += (acx*acx + acy*acy + acz*acz); */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* calculate cyclotron frequency */ /* omxt = qtmh*ox; */ /* omyt = qtmh*oy; */ /* omzt = qtmh*oz; */ e = _mm512_mul_ps(v_qtmh,v_ox); f = _mm512_mul_ps(v_qtmh,v_oy); g = _mm512_mul_ps(v_qtmh,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new velocity */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* new position */ /* dx = x + vx*dtc; */ /* dy = y + vy*dtc; */ /* dz = z + vz*dtc; */ v_dx = _mm512_fmadd_ps(v_vx,v_dtc,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dtc,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dtc,v_z); /* find particles going out of bounds */ /* mm = 0; */ v_mm = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* mm = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) v_dx = v_x; } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dx = v_x; } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* mm += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) v_dy = v_x; } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dy = v_x; } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* mm += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) v_dz = v_x; } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* mm += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* mm += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dz = v_x; } } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new velocity */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(kk,v_mm); for (i = 0; i < 16; i++) { mm = kk[i]; if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* new position */ dx = x + vx*dtc; dy = y + vy*dtc; dz = z + vz*dtc; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new velocity */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* normalize kinetic energy */ *ek += 0.5f*sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgrbppush3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. OpenMP/vector version using guard cells data read in tiles particles stored segmented array 202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores input: all, output: ppart, ek momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t))*gami, omy = (q/m)*by(x(t),y(t),z(t))*gami, omz = (q/m)*bz(x(t),y(t),z(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg z(t+dt) = z(t) + pz(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum px of particle n in tile m ppart[m][4][n] = momentum py of particle n in tile m ppart[m][5][n] = momentum pz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float qtmh, ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg; float x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_perm; __m512 v_qtmh, v_ci2, v_dt, v_dtc, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,x,y,z,vx, \ vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt, \ omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2, \ gami,qtmg,dtg,sum1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x, \ v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx, \ v_vy,v_vz,v_ox,v_oy,v_oz,v_gami,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,p,q,r, \ s,msk,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; ll = (mz < nz-loff ? mz : nz-loff) + 1; nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* find inverse gamma */ /* p2 = acx*acx + acy*acy + acz*acz; */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* time-centered kinetic energy */ /* sum1 += gami*p2/(1.0f + gami); */ v_at = _mm512_mul_ps(v_gami,v_at); v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami)); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* renormalize magnetic field */ /* qtmg = qtmh*gami; */ v_at = _mm512_mul_ps(v_qtmh,v_gami); /* calculate cyclotron frequency */ /* omxt = qtmg*ox; */ /* omyt = qtmg*oy; */ /* omzt = qtmg*oz; */ e = _mm512_mul_ps(v_at,v_ox); f = _mm512_mul_ps(v_at,v_oy); g = _mm512_mul_ps(v_at,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new momentum */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* update inverse gamma */ /* p2 = vx*vx + vy*vy + vz*vz; */ v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx)); v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at); /* dtg = dtc/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_mul_ps(v_dtc,v_at); */ /* full accuracy calculation */ v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_at = _mm512_div_ps(v_dtc,v_at); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_div_ps(v_dtc,v_at); */ /* new position */ /* dx = x + vx*dtg; */ /* dy = y + vy*dtg; */ /* dz = z + vz*dtg; */ v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* vz = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new momentum */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0f + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* update inverse gamma */ p2 = vx*vx + vy*vy + vz*vz; dtg = dtc/sqrtf(1.0f + p2*ci2); /* new position */ dx = x + vx*dtg; dy = y + vy*dtg; dz = z + vz*dtg; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; vz = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new momentum */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgrbppushf3lt(float ppart[], float fxyz[], float bxyz[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. also determines list of particles which are leaving this tile OpenMP/vector version using guard cells data read in tiles particles stored segmented array 202 flops/particle, 4 divides, 2 sqrts, 54 loads, 6 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, ek, irc momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t),z(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t),z(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) + .5*(q/m)*fz(x(t),y(t),z(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t),z(t))*gami, omy = (q/m)*by(x(t),y(t),z(t))*gami, omz = (q/m)*bz(x(t),y(t),z(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg z(t+dt) = z(t) + pz(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t),z(t)), fy(x(t),y(t),z(t)), and fz(x(t),y(t),z(t)), bx(x(t),y(t),z(t)), by(x(t),y(t),z(t)), and bz(x(t),y(t),z(t)) are approximated by interpolation from the nearest grid points: fx(x,y,z) = (1-dz)*((1-dy)*((1-dx)*fx(n,m,l)+dx*fx(n+1,m,l)) + dy*((1-dx)*fx(n,m+1,l) + dx*fx(n+1,m+1,l))) + dz*((1-dy)*((1-dx)*fx(n,m,l+1)+dx*fx(n+1,m,l+1)) + dy*((1-dx)*fx(n,m+1,l+1) + dx*fx(n+1,m+1,l+1))) where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l similarly for fy(x,y,z), fz(x,y,z), bx(x,y,z), by(x,y,z), bz(x,y,z) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum px of particle n in tile m ppart[m][4][n] = momentum py of particle n in tile m ppart[m][5][n] = momentum pz of particle n in tile m fxyz[l][k][j][0] = x component of force/charge at grid (j,k,l) fxyz[l][k][j][1] = y component of force/charge at grid (j,k,l) fxyz[l][k][j][2] = z component of force/charge at grid (j,k,l) that is, convolution of electric field over particle shape bxyz[l][k][j][0] = x component of magnetic field at grid (j,k,l) bxyz[l][k][j][1] = y component of magnetic field at grid (j,k,l) bxyz[l][k][j][2] = z component of magnetic field at grid (j,k,l) that is, the convolution of magnetic field over particle shape kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive force calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of field arrays, must be >= nx+1 nyv = third dimension of field arrays, must be >= ny+1 nzv = fourth dimension of field array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 fxyz needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, ii, ih, nh, nn, mm, ll, nm, mxv, myv, mxyv, nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx, dy, dz, ox, oy, oz, dx1; float acx, acy, acz, omxt, p2, gami, qtmg, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9, dtg; float qtmh, ci2, x, y, z, vx, vy, vz; double sum1, sum2; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_nm, v_it, v_0, v_1, v_3, v_9, v_perm; __m512 v_dt, v_dtc, v_one, v_zero, v_anx, v_any, v_anz; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s; __m512 v_qtmh, v_ci2, v_two, v_half, v_ox, v_oy, v_oz; __m512d v_sum1, v_d; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) double dd[8]; __attribute__((aligned(64))) float sfxyz[4*MXV*MYV*MZV]; __attribute__((aligned(64))) float sbxyz[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sfxyz[4*(mx+1)*(my+1)*(mz+1)]; */ /* __attribute__((aligned(64))) float sbxyz[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; anx = (float) nx; any = (float) ny; anz = (float) nz; sum2 = 0.0; /* set boundary values */ v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qtmh = _mm512_set1_ps(qtmh); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_dtc = _mm512_set1_ps(dtc); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_two = _mm512_set1_ps(2.0f); v_half = _mm512_set1_ps(0.5f); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_sum1 = _mm512_set1_pd(0.0); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,ih,nh,x, \ y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,ox,oy,oz,acx,acy,acz, \ omxt,omyt,omzt,omt,anorm,rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9, \ edgelx,edgely,edgelz,edgerx,edgery,edgerz,p2,gami,qtmg,dtg,sum1,v_noff, \ v_moff,v_loff,v_nn,v_mm,v_ll,v_nm,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp, \ v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ox,v_oy,v_oz, \ v_gami,v_at,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_d, \ v_sum1,a,b,c,d,e,f,g,h,p,q,r,s,msk1,msk2,kk,dd,sfxyz,sbxyz) \ reduction(+:sum2) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* load local fields from global array */ nps = 4*(nn/4); /* load electric field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sfxyz[4*(i+mxv*j+mxyv*k)] */ /* = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sfxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&fxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&fxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sfxyz[m],v_at); _mm512_packstorehi_ps(&sfxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sfxyz[4*(i+mxv*j+mxyv*k)] = fxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[1+4*(i+mxv*j+mxyv*k)] = fxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[2+4*(i+mxv*j+mxyv*k)] = fxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sfxyz[3+4*(i+mxv*j+mxyv*k)] = fxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* load magnetic field */ for (k = 0; k < ll; k++) { for (j = 0; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 0; i < nn; i++) { */ /* sbxyz[4*(i+mxv*j+mxyv*k)] */ /* = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[1+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* sbxyz[2+4*(i+mxv*j+mxyv*k)] */ /* = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&bxyz[m]); v_at = _mm512_loadunpackhi_ps(v_at,&bxyz[m+16]); m = 4*(i + mxv*j + mxyv*k); _mm512_packstorelo_ps(&sbxyz[m],v_at); _mm512_packstorehi_ps(&sbxyz[m+16],v_at); } /* loop over remaining elements */ for (i = nps; i < nn; i++) { sbxyz[4*(i+mxv*j+mxyv*k)] = bxyz[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[1+4*(i+mxv*j+mxyv*k)] = bxyz[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[2+4*(i+mxv*j+mxyv*k)] = bxyz[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; sbxyz[3+4*(i+mxv*j+mxyv*k)] = bxyz[3+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))]; } } } /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); sum1 = 0.0; v_sum1 = _mm512_set1_pd(0.0); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = x - (float) nn; */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_sub_ps(v_x,v_dxp); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nm = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_one,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* find electric field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of electric field */ /* dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_dx = _mm512_mul_ps(v_amx,a); v_dx = _mm512_fmadd_ps(v_amy,p,v_dx); /* dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_dy = _mm512_mul_ps(v_amx,b); v_dy = _mm512_fmadd_ps(v_amy,q,v_dy); /* dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_dz = _mm512_mul_ps(v_amx,c); v_dz = _mm512_fmadd_ps(v_amy,r,v_dz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of electric field */ /* dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_dx = _mm512_fmadd_ps(v_dyp,a,v_dx); v_dx = _mm512_fmadd_ps(v_dx1,p,v_dx); v_dx = _mm512_mul_ps(v_amz,v_dx); /* dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_dy = _mm512_fmadd_ps(v_dyp,b,v_dy); v_dy = _mm512_fmadd_ps(v_dx1,q,v_dy); v_dy = _mm512_mul_ps(v_amz,v_dy); /* dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_dz = _mm512_fmadd_ps(v_dyp,c,v_dz); v_dz = _mm512_fmadd_ps(v_dx1,r,v_dz); v_dz = _mm512_mul_ps(v_amz,v_dz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sfxyz[nn:nn+3] and sfxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of electric field */ /* vx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sfxyz[mm:mm+3] and sfxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sfxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sfxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sfxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sfxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sfxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sfxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sfxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of electric field */ /* dx = dx + dzp*(vx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_dx = _mm512_fmadd_ps(v_dzp,v_vx,v_dx); /* dy = dy + dzp*(vy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_dy = _mm512_fmadd_ps(v_dzp,v_vy,v_dy); /* dz = dz + dzp*(vz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_dz = _mm512_fmadd_ps(v_dzp,v_vz,v_dz); /* find magnetic field */ /* nn = nm; */ _mm512_store_epi32(kk,v_nm); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find first part of magnetic field */ /* ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_ox = _mm512_mul_ps(v_amx,a); v_ox = _mm512_fmadd_ps(v_amy,p,v_ox); /* oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_oy = _mm512_mul_ps(v_amx,b); v_oy = _mm512_fmadd_ps(v_amy,q,v_oy); /* oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_oz = _mm512_mul_ps(v_amx,c); v_oz = _mm512_fmadd_ps(v_amy,r,v_oz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find second part of magnetic field */ /* ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_ox = _mm512_fmadd_ps(v_dyp,a,v_ox); v_ox = _mm512_fmadd_ps(v_dx1,p,v_ox); v_ox = _mm512_mul_ps(v_amz,v_ox); /* oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_oy = _mm512_fmadd_ps(v_dyp,b,v_oy); v_oy = _mm512_fmadd_ps(v_dx1,q,v_oy); v_oy = _mm512_mul_ps(v_amz,v_oy); /* oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_oz = _mm512_fmadd_ps(v_dyp,c,v_oz); v_oz = _mm512_fmadd_ps(v_dx1,r,v_oz); v_oz = _mm512_mul_ps(v_amz,v_oz); /* nn += 4*mxyv; */ v_nn = _mm512_add_epi32(v_nm,v_mxyv4); _mm512_store_epi32(kk,v_nn); /* load sbxyz[nn:nn+3] and sbxyz[nn+4:nn+7] field components */ /* first block of 4 particles */ mm = kk[0]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14]; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15]; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[nn:nn+3] field components */ /* where nn = nn + 4*mxyv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[nn+4:nn+7] field components */ /* where nn = nn + 4*mxyv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find third part of magnetic field */ /* vx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; */ v_vx = _mm512_mul_ps(v_amx,a); v_vx = _mm512_fmadd_ps(v_amy,p,v_vx); /* vy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; */ v_vy = _mm512_mul_ps(v_amx,b); v_vy = _mm512_fmadd_ps(v_amy,q,v_vy); /* vz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; */ v_vz = _mm512_mul_ps(v_amx,c); v_vz = _mm512_fmadd_ps(v_amy,r,v_vz); /* mm = nn + 4*mxv; */ /* load sbxyz[mm:mm+3] and sbxyz[mm+4:mm+7] field components */ /* first block of 4 particles */ mm = kk[0] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[1] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[2] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[3] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); p = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* second block of 4 particles */ mm = kk[4] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[5] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[6] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[7] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); b = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* third block of 4 particles */ mm = kk[8] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[9] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[10] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[11] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); c = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); r = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* fourth block of 4 particles */ mm = kk[12] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(255), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[13] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(255), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(255), &sbxyz[mm+16]); mm = kk[14] + 4*mxv; e = _mm512_mask_loadunpacklo_ps(e,_mm512_int2mask(65280), &sbxyz[mm]); e = _mm512_mask_loadunpackhi_ps(e,_mm512_int2mask(65280), &sbxyz[mm+16]); mm = kk[15] + 4*mxv; f = _mm512_mask_loadunpacklo_ps(f,_mm512_int2mask(65280), &sbxyz[mm]); f = _mm512_mask_loadunpackhi_ps(f,_mm512_int2mask(65280), &sbxyz[mm+16]); d = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),f,177); s = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(3855),e,177); /* perform 16x3 transpose for sbxyz[mm:mm+3] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(61680),b,177); f = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(3855),a,177); g = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(61680),d,177); b = _mm512_mask_permute4f128_ps(d,_mm512_int2mask(3855),c,177); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); c = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); b = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),b,78); /* perform 16x3 transpose for sbxyz[mm+4:mm+7] field components */ /* where mm = nn + 4*mxyv + 4*mxv; */ p = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)p); q = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)q); r = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)r); s = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)s); e = _mm512_mask_permute4f128_ps(p,_mm512_int2mask(61680),q,177); f = _mm512_mask_permute4f128_ps(q,_mm512_int2mask(3855),p,177); g = _mm512_mask_permute4f128_ps(r,_mm512_int2mask(61680),s,177); q = _mm512_mask_permute4f128_ps(s,_mm512_int2mask(3855),r,177); p = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(65280),g,78); r = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(255),e,78); q = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(65280),q,78); /* find fourth part of magnetic field */ /* ox = ox + dzp*(vx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); */ v_vx = _mm512_fmadd_ps(v_dyp,a,v_vx); v_vx = _mm512_fmadd_ps(v_dx1,p,v_vx); v_ox = _mm512_fmadd_ps(v_dzp,v_vx,v_ox); /* oy = oy + dzp*(vy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); */ v_vy = _mm512_fmadd_ps(v_dyp,b,v_vy); v_vy = _mm512_fmadd_ps(v_dx1,q,v_vy); v_oy = _mm512_fmadd_ps(v_dzp,v_vy,v_oy); /* oz = oz + dzp*(vz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); */ v_vz = _mm512_fmadd_ps(v_dyp,c,v_vz); v_vz = _mm512_fmadd_ps(v_dx1,r,v_vz); v_oz = _mm512_fmadd_ps(v_dzp,v_vz,v_oz); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm512_mul_ps(v_dx,v_qtmh); v_dy = _mm512_mul_ps(v_dy,v_qtmh); v_dz = _mm512_mul_ps(v_dz,v_qtmh); /* half acceleration */ /* acx = ppart[j+3*nppmx+npoff] + dx; */ /* acy = ppart[j+4*nppmx+npoff] + dy; */ /* acz = ppart[j+5*nppmx+npoff] + dz; */ a = _mm512_add_ps(v_dx,_mm512_load_ps(&ppart[j+3*nppmx+npoff])); b = _mm512_add_ps(v_dy,_mm512_load_ps(&ppart[j+4*nppmx+npoff])); c = _mm512_add_ps(v_dz,_mm512_load_ps(&ppart[j+5*nppmx+npoff])); /* find inverse gamma */ /* p2 = acx*acx + acy*acy + acz*acz; */ v_at = _mm512_fmadd_ps(b,b,_mm512_mul_ps(a,a)); v_at = _mm512_fmadd_ps(c,c,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* time-centered kinetic energy */ /* sum1 += gami*p2/(1.0f + gami); */ v_at = _mm512_mul_ps(v_gami,v_at); v_at = _mm512_div_ps(v_at,_mm512_add_ps(v_one,v_gami)); /* convert to double precision before accumulating */ v_sum1 = _mm512_add_pd(v_sum1,_mm512_cvtpslo_pd(v_at)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_at,78)); v_sum1 = _mm512_add_pd(v_sum1,v_d); /* renormalize magnetic field */ /* qtmg = qtmh*gami; */ v_at = _mm512_mul_ps(v_qtmh,v_gami); /* calculate cyclotron frequency */ /* omxt = qtmg*ox; */ /* omyt = qtmg*oy; */ /* omzt = qtmg*oz; */ e = _mm512_mul_ps(v_at,v_ox); f = _mm512_mul_ps(v_at,v_oy); g = _mm512_mul_ps(v_at,v_oz); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm512_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm512_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm512_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm512_add_ps(_mm512_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm512_div_ps(v_two,_mm512_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm512_mul_ps(v_half,_mm512_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm512_mul_ps(_mm512_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm512_mul_ps(_mm512_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm512_mul_ps(_mm512_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm512_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm512_fmadd_ps(_mm512_add_ps(h,g),b,v_vx); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm512_fmadd_ps(_mm512_sub_ps(h,g),a,v_vy); /* omt = omxt*omzt; */ h = _mm512_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm512_fmadd_ps(_mm512_sub_ps(h,f),c,v_vx); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm512_fmadd_ps(_mm512_add_ps(h,f),a,v_vz); /* omt = omyt*omzt; */ h = _mm512_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm512_fmadd_ps(_mm512_add_ps(h,e),c,v_vy); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm512_fmadd_ps(_mm512_sub_ps(h,e),b,v_vz); /* new momentum */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm512_fmadd_ps(v_vx,d,v_dx); v_vy = _mm512_fmadd_ps(v_vy,d,v_dy); v_vz = _mm512_fmadd_ps(v_vz,d,v_dz); /* update inverse gamma */ /* p2 = vx*vx + vy*vy + vz*vz; */ v_at = _mm512_fmadd_ps(v_vy,v_vy,_mm512_mul_ps(v_vx,v_vx)); v_at = _mm512_fmadd_ps(v_vz,v_vz,v_at); /* dtg = dtc/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_mul_ps(v_dtc,v_at); */ /* full accuracy calculation */ v_at = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_at = _mm512_div_ps(v_dtc,v_at); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* v_at = _mm512_div_ps(v_dtc,v_at); */ /* new position */ /* dx = x + vx*dtg; */ /* dy = y + vy*dtg; */ /* dz = z + vz*dtg; */ v_dx = _mm512_fmadd_ps(v_vx,v_at,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_at,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_at,v_z); /* find particles going out of bounds */ /* mm = 0; */ v_mm = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* mm = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) v_dx = v_x; } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dx = v_x; } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* mm += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) v_dy = v_x; } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dy = v_x; } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* mm += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) v_dz = v_x; } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* mm += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* mm += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dz = v_x; } } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* set new momentum */ /* ppart[j+3*nppmx+npoff] = vx; */ /* ppart[j+4*nppmx+npoff] = vy; */ /* ppart[j+5*nppmx+npoff] = vz; */ _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(kk,v_mm); for (i = 0; i < 16; i++) { mm = kk[i]; if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = x - (float) nn; dyp = y - (float) mm; dzp = z - (float) ll; nm = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = 1.0f - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* find electric field */ nn = nm; dx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; dy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; dz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = amz*(dx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = amz*(dy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = amz*(dz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sfxyz[nn] + amy*sfxyz[nn+4]; acy = amx*sfxyz[nn+1] + amy*sfxyz[nn+1+4]; acz = amx*sfxyz[nn+2] + amy*sfxyz[nn+2+4]; mm = nn + 4*mxv; dx = dx + dzp*(acx + dyp*sfxyz[mm] + dx1*sfxyz[mm+4]); dy = dy + dzp*(acy + dyp*sfxyz[mm+1] + dx1*sfxyz[mm+1+4]); dz = dz + dzp*(acz + dyp*sfxyz[mm+2] + dx1*sfxyz[mm+2+4]); /* find magnetic field */ nn = nm; ox = amx*sbxyz[nn] + amy*sbxyz[nn+4]; oy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; oz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = amz*(ox + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = amz*(oy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = amz*(oz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); nn += 4*mxyv; acx = amx*sbxyz[nn] + amy*sbxyz[nn+4]; acy = amx*sbxyz[nn+1] + amy*sbxyz[nn+1+4]; acz = amx*sbxyz[nn+2] + amy*sbxyz[nn+2+4]; mm = nn + 4*mxv; ox = ox + dzp*(acx + dyp*sbxyz[mm] + dx1*sbxyz[mm+4]); oy = oy + dzp*(acy + dyp*sbxyz[mm+1] + dx1*sbxyz[mm+1+4]); oz = oz + dzp*(acz + dyp*sbxyz[mm+2] + dx1*sbxyz[mm+2+4]); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+3*nppmx+npoff] + dx; acy = ppart[j+4*nppmx+npoff] + dy; acz = ppart[j+5*nppmx+npoff] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0f + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; /* update inverse gamma */ p2 = vx*vx + vy*vy + vz*vz; dtg = dtc/sqrtf(1.0f + p2*ci2); /* new position */ dx = x + vx*dtg; dy = y + vy*dtg; dz = z + vz*dtg; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* set new momentum */ ppart[j+3*nppmx+npoff] = vx; ppart[j+4*nppmx+npoff] = vy; ppart[j+5*nppmx+npoff] = vz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* sum2 += sum1; */ _mm512_store_pd(&dd[0],v_sum1); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgppost3lt(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1) { /* for 3d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 33 flops/particle, 11 loads, 8 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz) q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz) q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz) q(n+1,m+1,l)=qm*dx*dy*(1.-dz) q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz q(n+1,m,l+1)=qm*dx*(1.-dy)*dz q(n,m+1,l+1)=qm*(1.-dx)*dy*dz q(n+1,m+1,l+1)=qm*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m q[l][k][j] = charge density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 6 mx/my/mz = number of grids in sorting cell in x/y/z nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 nzv = third dimension of charge array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz; __m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_one; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_as, v_at; __m512 a, b, c, d, e, f, g, h, qp, qr; __mmask16 msk, msks, v_m; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) float sq[MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx + 1; myv = my + 1; mxyv = mxv*myv; nxyv = nxv*nyv; v_mxv = _mm512_set1_epi32(mxv); v_mxyv = _mm512_set1_epi32(mxyv); v_qm = _mm512_set1_ps(qm); v_one = _mm512_set1_ps(1.0f); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \ dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \ v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,a,b,c, \ d,e,f,g,h,qp,qr,msk,msks,kk,sq) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < mxyv*(mz+1); j++) { */ /* sq[j] = 0.0f; */ /* } */ memset((void*)sq,0,mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm)); v_nn = _mm512_add_epi32(v_nn,v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* d = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit charge */ /* x = sq[nn] + amx*amz; */ /* y = sq[nn+1] + amy*amz; */ /* z = sq[nn+mxv] + dyp*amz; */ /* w = sq[nn+1+mxv] + dx1*amz; */ /* sq[nn] = x; */ /* sq[nn+1] = y; */ /* sq[nn+mxv] = z; */ /* sq[nn+1+mxv] = w; */ /* mm = nn + mxyv; */ /* x = sq[mm] + amx*dzp; */ /* y = sq[mm+1] + amy*dzp; */ /* z = sq[mm+mxv] + dyp*dzp; */ /* w = sq[mm+1+mxv] + dx1*dzp; */ /* sq[mm] = x; */ /* sq[mm+1] = y; */ /* sq[mm+mxv] = z; */ /* sq[mm+1+mxv] = w; */ /* deposit charge for two particles at a time */ for (i = 0; i < 8; i++) { /* first particle */ mm = kk[2*i]; msk = _mm512_int2mask(3<<(2*i)); msks = _mm512_int2mask(2<<(2*i)); qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)a,msks, (__m512i)b,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)c,msks, (__m512i)d,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); mm = mm + mxyv; qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)e,msks, (__m512i)f,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)g,msks, (__m512i)h,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); /* second particle */ mm = kk[2*i+1]; msks = _mm512_int2mask(1<<(2*i)); qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)b,msks, (__m512i)a,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)d,msks, (__m512i)c,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); mm = mm + mxyv; qp = _mm512_mask_loadunpacklo_ps(qp,msk,&sq[mm]); qp = _mm512_mask_loadunpackhi_ps(qp,msk,&sq[mm+16]); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)f,msks, (__m512i)e,177); qp = _mm512_mask_add_ps(qp,msk,qp,v_at); _mm512_mask_packstorelo_ps(&sq[mm],msk,qp); _mm512_mask_packstorehi_ps(&sq[mm+16],msk,qp); ll = mm + mxv; qr = _mm512_mask_loadunpacklo_ps(qr,msk,&sq[ll]); qr = _mm512_mask_loadunpackhi_ps(qr,msk,&sq[ll+16]); v_as = (__m512)_mm512_mask_shuffle_epi32((__m512i)h,msks, (__m512i)g,177); qr = _mm512_mask_add_ps(qr,msk,qr,v_as); _mm512_mask_packstorelo_ps(&sq[ll],msk,qr); _mm512_mask_packstorehi_ps(&sq[ll+16],msk,qr); } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); amx = qm - dxp; amy = 1.0f - dyp; amz = 1.0f - dzp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amy = dxp*amy; /* deposit charge */ x = sq[nn] + amx*amz; y = sq[nn+1] + amy*amz; z = sq[nn+mxv] + dyp*amz; w = sq[nn+1+mxv] + dx1*amz; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + amx*dzp; y = sq[mm+1] + amy*dzp; z = sq[mm+mxv] + dyp*dzp; w = sq[mm+1+mxv] + dx1*dzp; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } /* deposit charge to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 16*(nn/16); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */ /* += sq[i+mxv*j+mxyv*k]; */ /* } */ for (i = 0; i < nps; i+=16) { m = i + mxv*j + mxyv*k; v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]); v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]); m = i + noff + nxv*(j + moff) + nxyv*(k + loff); v_at = _mm512_loadunpacklo_ps(v_at,&q[m]); v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]); /* skip add for first element for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&q[m],v_at); _mm512_packstorehi_ps(&q[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m ; i < nn; i++) { q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[i+mxv*j+mxyv*k]; } } } /* deposit charge to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j]; if (lm > mz) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[i+mxv*j+mxyv*(lm-1)]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)] += sq[i+mxv*(mm-1)+mxyv*k]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)] += sq[nm-1+mxv*j+mxyv*k]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)] += sq[i+mxv*(mm-1)+mxyv*(lm-1)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[mxv*j+mxyv*(lm-1)]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[nm-1+mxv*j+mxyv*(lm-1)]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cknc2gppost3lt(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1) { /* for 3d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 33 flops/particle, 11 loads, 8 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m,l)=qm*(1.-dx)*(1.-dy)*(1.-dz) q(n+1,m,l)=qm*dx*(1.-dy)*(1.-dz) q(n,m+1,l)=qm*(1.-dx)*dy*(1.-dz) q(n+1,m+1,l)=qm*dx*dy*(1.-dz) q(n,m,l+1)=qm*(1.-dx)*(1.-dy)*dz q(n+1,m,l+1)=qm*dx*(1.-dy)*dz q(n,m+1,l+1)=qm*(1.-dx)*dy*dz q(n+1,m+1,l+1)=qm*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m q[l][k][j] = charge density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 6 mx/my/mz = number of grids in sorting cell in x/y/z nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 nzv = third dimension of charge array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 requires KNC, ppart needs to be 64 byte aligned nppmx needs to be a multiple of 16 local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, nm, lm, mxv, myv, mxyv, nxyv; float x, y, z, w, dx1, dxp, dyp, dzp, amx, amy, amz; __m512i v_noff, v_moff, v_loff, v_mxv, v_mxyv; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_one; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_as, v_at; __mmask16 v_m; __attribute__((aligned(64))) unsigned int kk[16]; typedef union vfloat {float v[16]; __m512 v16;} vf; __attribute__((aligned(64))) float sq[MXV*MYV*MZV]; /* __attribute__((aligned(64))) float sq[(mx+1)*(my+1)*(mz+1)]; */ vf vv[8]; mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx + 1; myv = my + 1; mxyv = mxv*myv; nxyv = nxv*nyv; v_mxv = _mm512_set1_epi32(mxv); v_mxyv = _mm512_set1_epi32(mxyv); v_qm = _mm512_set1_ps(qm); v_one = _mm512_set1_ps(1.0f); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,nm,lm,x,y,z,w, \ dxp,dyp,dzp,amx,amy,amz,dx1,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \ v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_at,v_as,kk,sq,vv) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < mxyv*(mz+1); j++) { */ /* sq[j] = 0.0f; */ /* } */ memset((void*)sq,0,mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* vector loop over particles in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); /* dyp = y - (float) mm; */ v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); /* dzp = z - (float) ll; */ v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv,v_mm)); v_nn = _mm512_add_epi32(v_nn,v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* x = amx*amz; */ /* y = amy*amz; */ /* z = dyp*amz; */ /* w = dx1*amz; */ vv[0].v16 = _mm512_mul_ps(v_amx,v_amz); vv[1].v16 = _mm512_mul_ps(v_amy,v_amz); vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz); vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz); vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp); vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp); vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp); vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit charge */ /* x = sq[nn] + amx*amz; */ /* y = sq[nn+1] + amy*amz; */ /* z = sq[nn+mxv] + dyp*amz; */ /* w = sq[nn+1+mxv] + dx1*amz; */ /* sq[nn] = x; */ /* sq[nn+1] = y; */ /* sq[nn+mxv] = z; */ /* sq[nn+1+mxv] = w; */ /* mm = nn + mxyv; */ /* x = sq[mm] + amx*dzp; */ /* y = sq[mm+1] + amy*dzp; */ /* z = sq[mm+mxv] + dyp*dzp; */ /* w = sq[mm+1+mxv] + dx1*dzp; */ /* sq[mm] = x; */ /* sq[mm+1] = y; */ /* sq[mm+mxv] = z; */ /* sq[mm+1+mxv] = w; */ for (i = 0; i < 16; i++) { nn = kk[i]; x = sq[nn] + vv[0].v[i]; y = sq[nn+1] + vv[1].v[i]; z = sq[nn+mxv] + vv[2].v[i]; w = sq[nn+1+mxv] + vv[3].v[i]; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + vv[4].v[i]; y = sq[mm+1] + vv[5].v[i]; z = sq[mm+mxv] + vv[6].v[i]; w = sq[mm+1+mxv] + vv[7].v[i]; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = nn - noff + mxv*(mm - moff) + mxyv*(ll - loff); amx = qm - dxp; amy = 1.0f - dyp; amz = 1.0f - dzp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amy = dxp*amy; /* deposit charge */ x = sq[nn] + amx*amz; y = sq[nn+1] + amy*amz; z = sq[nn+mxv] + dyp*amz; w = sq[nn+1+mxv] + dx1*amz; sq[nn] = x; sq[nn+1] = y; sq[nn+mxv] = z; sq[nn+1+mxv] = w; mm = nn + mxyv; x = sq[mm] + amx*dzp; y = sq[mm+1] + amy*dzp; z = sq[mm+mxv] + dyp*dzp; w = sq[mm+1+mxv] + dx1*dzp; sq[mm] = x; sq[mm+1] = y; sq[mm+mxv] = z; sq[mm+1+mxv] = w; } /* deposit charge to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 16*(nn/16); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] */ /* += sq[i+mxv*j+mxyv*k]; */ /* } */ for (i = 0; i < nps; i+=16) { m = i + mxv*j + mxyv*k; v_as = _mm512_loadunpacklo_ps(v_as,&sq[m]); v_as = _mm512_loadunpackhi_ps(v_as,&sq[m+16]); m = i + noff + nxv*(j + moff) + nxyv*(k + loff); v_at = _mm512_loadunpacklo_ps(v_at,&q[m]); v_at = _mm512_loadunpackhi_ps(v_at,&q[m+16]); /* skip add for first element for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&q[m],v_at); _mm512_packstorehi_ps(&q[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m ; i < nn; i++) { q[i+noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[i+mxv*j+mxyv*k]; } } } /* deposit charge to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*loff] += sq[i+mxv*j]; if (lm > mz) { #pragma omp atomic q[i+noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[i+mxv*j+mxyv*(lm-1)]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(k+loff)] += sq[i+mxyv*k]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(k+loff)] += sq[i+mxv*(mm-1)+mxyv*k]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(k+loff)] += sq[mxv*j+mxyv*k]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(k+loff)] += sq[nm-1+mxv*j+mxyv*k]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff+nxyv*(lm+loff-1)] += sq[i+mxyv*(lm-1)]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1)] += sq[i+mxv*(mm-1)+mxyv*(lm-1)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[mxv*j+mxyv*(lm-1)]; if (nm > mx) { #pragma omp atomic q[nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1)] += sq[nm-1+mxv*j+mxyv*(lm-1)]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgjppost3lt(float ppart[], float cu[], int kpic[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 69 flops/particle, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*vi, where i = x,y,z ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv; float edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float x, y, z; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_perm; __m512 v_qm, v_dt, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv; __m512 cp, cr; __mmask16 msk, v_m; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) float scu[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qm = _mm512_set1_ps(qm); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y, \ z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,v_noff,v_moff,v_loff, \ v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz, \ v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_at,v_as,a,b,c,d,e,f,g,h,p,q,r, \ s,t,u,v,ws,wt,wu,wv,cp,cr,msk,kk,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); /* deposit current */ /* vx = ppart[j+3*nppmx+npoff]; */ /* vy = ppart[j+4*nppmx+npoff]; */ /* vz = ppart[j+5*nppmx+npoff]; */ v_vx = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_vy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_vz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); v_ll = _mm512_add_epi32(v_nn,v_mxyv4); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { ii = i >> 2; if (i==(ii<<2)) { switch (ii) { case 0: /* replicate velocities of first group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,0); q = _mm512_permute4f128_ps(v_vy,0); r = _mm512_permute4f128_ps(v_vz,0); /* regroup weights for first group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); break; case 1: /* replicate velocities of second group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,85); q = _mm512_permute4f128_ps(v_vy,85); r = _mm512_permute4f128_ps(v_vz,85); /* regroup weights for second group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); break; case 2: /* replicate velocities of third group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,170); q = _mm512_permute4f128_ps(v_vy,170); r = _mm512_permute4f128_ps(v_vz,170); /* regroup weights for third group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); v = _mm512_permute4f128_ps(v,78); break; case 3: /* replicate velocities of fourth group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,255); q = _mm512_permute4f128_ps(v_vy,255); r = _mm512_permute4f128_ps(v_vz,255); /* regroup weights for fourth group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); v = _mm512_permute4f128_ps(v,78); break; } } v_it = _mm512_setzero_epi32(); switch (i-(ii<<2)) { /* first particle */ case 0: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,0); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,0); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,0); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,0); break; /* second particle */ case 1: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,24); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,85); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,85); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,85); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,85); break; /* third particle */ case 2: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r, _mm512_int2mask(51),(__m512i)v_at,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,170); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,170); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,170); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,170); break; /* fourth particle */ case 3: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,177); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,255); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,255); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,255); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,255); break; } _mm512_store_epi32(kk,v_nn); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*amz; */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); _mm512_store_epi32(kk,v_ll); /* nn += 4*mxyv; */ /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*dzp; */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*dzp; */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* ppart[j+5*nppmx+npoff] = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_vz = _mm512_mask_sub_ps(v_vz,msk,v_zero,v_vz); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_vz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_vx = _mm512_mask_sub_ps(v_vx,msk,v_zero,v_vx); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_vy = _mm512_mask_sub_ps(v_vy,msk,v_zero,v_vy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_vy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ppart[j+3*nppmx+npoff]; vy = ppart[j+4*nppmx+npoff]; vz = ppart[j+5*nppmx+npoff]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[j+5*nppmx+npoff] = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 4*(nn/4); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[4*(i+mxv*j+mxyv*k)]; */ /* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[1+4*(i+mxv*j+mxyv*k)]; */ /* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[2+4*(i+mxv*j+mxyv*k)]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + mxv*j + mxyv*k); v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]); v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]); m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]); v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]); /* skip add for first elements for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&cu[m],v_at); _mm512_packstorehi_ps(&cu[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgjppostf3lt(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation, with periodic boundary conditions. in addition, particle positions are advanced a half time-step also determines list of particles which are leaving this tile OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 69 flops/particle, 30 loads, 27 stores input: all except ncl, ihole, irc output: ppart, cu, ncl, ihole, ek, irc current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*vi, where i = x,y,z ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used optimized version local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, ih, nh, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv; int nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float x, y, z; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_0, v_1, v_3, v_9, v_perm; __m512 v_qm, v_dt, v_one, v_zero, v_anx, v_any, v_anz; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv; __m512 cp, cr; __mmask16 msk, msk1, msk2, v_m; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) float scu[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; anx = (float) nx; any = (float) ny; anz = (float) nz; /* set boundary values */ v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qm = _mm512_set1_ps(qm); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ii,nm,lm, \ x,y,z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,edgelx,edgely, \ edgelz,edgerx,edgery,edgerz,v_noff,v_moff,v_loff,v_nn,v_mm,v_ll,v_it, \ v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz, \ v_vx,v_vy,v_vz,v_edgelx,v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz, \ v_at,v_as,a,b,c,d,e,f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,msk1, \ msk2,kk,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); /* deposit current */ /* vx = ppart[j+3*nppmx+npoff]; */ /* vy = ppart[j+4*nppmx+npoff]; */ /* vz = ppart[j+5*nppmx+npoff]; */ v_vx = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_vy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_vz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); v_ll = _mm512_add_epi32(v_nn,v_mxyv4); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { ii = i >> 2; if (i==(ii<<2)) { switch (ii) { case 0: /* replicate velocities of first group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,0); q = _mm512_permute4f128_ps(v_vy,0); r = _mm512_permute4f128_ps(v_vz,0); /* regroup weights for first group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); break; case 1: /* replicate velocities of second group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,85); q = _mm512_permute4f128_ps(v_vy,85); r = _mm512_permute4f128_ps(v_vz,85); /* regroup weights for second group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); break; case 2: /* replicate velocities of third group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,170); q = _mm512_permute4f128_ps(v_vy,170); r = _mm512_permute4f128_ps(v_vz,170); /* regroup weights for third group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); v = _mm512_permute4f128_ps(v,78); break; case 3: /* replicate velocities of fourth group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,255); q = _mm512_permute4f128_ps(v_vy,255); r = _mm512_permute4f128_ps(v_vz,255); /* regroup weights for fourth group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); v = _mm512_permute4f128_ps(v,78); break; } } v_it = _mm512_setzero_epi32(); switch (i-(ii<<2)) { /* first particle */ case 0: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,0); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,0); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,0); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,0); break; /* second particle */ case 1: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,24); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,85); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,85); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,85); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,85); break; /* third particle */ case 2: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r, _mm512_int2mask(51),(__m512i)v_at,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,170); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,170); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,170); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,170); break; /* fourth particle */ case 3: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,177); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,255); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,255); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,255); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,255); break; } _mm512_store_epi32(kk,v_nn); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*amz; */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); _mm512_store_epi32(kk,v_ll); /* nn += 4*mxyv; */ /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*dzp; */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*dzp; */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* find particles going out of bounds */ /* mm = 0; */ v_mm = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* mm = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) v_dx = v_x; } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dx = v_x; } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* mm += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) v_dy = v_x; } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dy = v_x; } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* mm += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) v_dz = v_x; } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* mm += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* mm += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dz = v_x; } } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(kk,v_mm); for (i = 0; i < 16; i++) { mm = kk[i]; if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ppart[j+3*nppmx+npoff]; vy = ppart[j+4*nppmx+npoff]; vz = ppart[j+5*nppmx+npoff]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 4*(nn/4); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[4*(i+mxv*j+mxyv*k)]; */ /* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[1+4*(i+mxv*j+mxyv*k)]; */ /* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[2+4*(i+mxv*j+mxyv*k)]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + mxv*j + mxyv*k); v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]); v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]); m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]); v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]); /* skip add for first elements for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&cu[m],v_at); _mm512_packstorehi_ps(&cu[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgrjppost3lt(float ppart[], float cu[], int kpic[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation for relativistic particles in addition, particle positions are advanced a half time-step OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum vx of particle n in tile m ppart[m][4][n] = momentum vy of particle n in tile m ppart[m][5][n] = momentum vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv; float ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float p2, gami; float x, y, z, ux, uy, uz; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_perm; __m512 v_qm, v_ci2, v_dt, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_ux, v_uy, v_uz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv; __m512 cp, cr; __mmask16 msk, v_m; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) float scu[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; ci2 = ci*ci; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qm = _mm512_set1_ps(qm); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \ vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,p2,gami,v_noff, \ v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx, \ v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ux,v_uy,v_uz,v_gami, \ v_at,v_as,a,b,c,d,e,f,g,h,p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,kk,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* find inverse gamma */ /* ux = ppart[j+3*nppmx+npoff]; */ /* uy = ppart[j+4*nppmx+npoff]; */ /* uz = ppart[j+5*nppmx+npoff]; */ v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); /* p2 = ux*ux + uy*uy + uz*uz; */ v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux)); v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* calculate weights */ /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); /* deposit current */ /* vx = ux*gami; */ /* vy = uy*gami; */ /* vz = uz*gami; */ v_vx = _mm512_mul_ps(v_ux,v_gami); v_vy = _mm512_mul_ps(v_uy,v_gami); v_vz = _mm512_mul_ps(v_uz,v_gami); v_ll = _mm512_add_epi32(v_nn,v_mxyv4); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { ii = i >> 2; if (i==(ii<<2)) { switch (ii) { case 0: /* replicate velocities of first group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,0); q = _mm512_permute4f128_ps(v_vy,0); r = _mm512_permute4f128_ps(v_vz,0); /* regroup weights for first group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); break; case 1: /* replicate velocities of second group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,85); q = _mm512_permute4f128_ps(v_vy,85); r = _mm512_permute4f128_ps(v_vz,85); /* regroup weights for second group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); break; case 2: /* replicate velocities of third group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,170); q = _mm512_permute4f128_ps(v_vy,170); r = _mm512_permute4f128_ps(v_vz,170); /* regroup weights for third group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); v = _mm512_permute4f128_ps(v,78); break; case 3: /* replicate velocities of fourth group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,255); q = _mm512_permute4f128_ps(v_vy,255); r = _mm512_permute4f128_ps(v_vz,255); /* regroup weights for fourth group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); v = _mm512_permute4f128_ps(v,78); break; } } v_it = _mm512_setzero_epi32(); switch (i-(ii<<2)) { /* first particle */ case 0: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,0); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,0); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,0); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,0); break; /* second particle */ case 1: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,24); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,85); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,85); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,85); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,85); break; /* third particle */ case 2: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r, _mm512_int2mask(51),(__m512i)v_at,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,170); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,170); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,170); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,170); break; /* fourth particle */ case 3: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,177); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,255); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,255); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,255); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,255); break; } _mm512_store_epi32(kk,v_nn); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*amz; */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); _mm512_store_epi32(kk,v_ll); /* nn += 4*mxyv; */ /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*dzp; */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*dzp; */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -ux; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -uy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* ppart[j+5*nppmx+npoff] = -uz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_uz = _mm512_mask_sub_ps(v_uz,msk,v_zero,v_uz); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_uz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -ux; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -uy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; /* find inverse gamma */ ux = ppart[j+3*nppmx+npoff]; uy = ppart[j+4*nppmx+npoff]; uz = ppart[j+5*nppmx+npoff]; p2 = ux*ux + uy*uy + uz*uz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ux*gami; vy = uy*gami; vz = uz*gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -ux; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -uy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[j+5*nppmx+npoff] = -uz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -ux; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -uy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 4*(nn/4); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[4*(i+mxv*j+mxyv*k)]; */ /* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[1+4*(i+mxv*j+mxyv*k)]; */ /* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[2+4*(i+mxv*j+mxyv*k)]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + mxv*j + mxyv*k); v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]); v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]); m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]); v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]); /* skip add for first elements for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&cu[m],v_at); _mm512_packstorehi_ps(&cu[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncgrjppostf3lt(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ntmax, int *irc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation with periodic boundary conditions for relativistic particles. in addition, particle positions are advanced a half time-step also determines list of particles which are leaving this tile OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores input: all except ncl, ihole, irc output: ppart, cu, ncl, ihole, ek, irc current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum vx of particle n in tile m ppart[m][4][n] = momentum vy of particle n in tile m ppart[m][5][n] = momentum vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic[l] = number of particles in tile l ncl[l][i] = number of particles going to destination i, tile l ihole[l][:][0] = location of hole in array left by departing particle ihole[l][:][1] = direction destination of particle leaving hole all for tile l ihole[l][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, ih, nh, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv; int nxyv; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float ci2, p2, gami; float x, y, z, ux, uy, uz; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it, v_0, v_1, v_3, v_9, v_perm; __m512 v_qm, v_ci2, v_dt, v_one, v_zero, v_anx, v_any, v_anz; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_as, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m512 v_ux, v_uy, v_uz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 a, b, c, d, e, f, g, h, p, q, r, s, t, u, v, ws, wt, wu, wv; __m512 cp, cr; __mmask16 msk, msk1, msk2, v_m; __attribute__((aligned(64))) unsigned int kk[16]; __attribute__((aligned(64))) float scu[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */ mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; ci2 = ci*ci; anx = (float) nx; any = (float) ny; anz = (float) nz; /* set boundary values */ v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_perm = _mm512_set_epi32(15,11,7,3,14,10,6,2,13,9,5,1,12,8,4,0); v_qm = _mm512_set1_ps(qm); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,ih,nh, \ x,y,z,vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,edgelx, \ edgely,edgelz,edgerx,edgery,edgerz,p2,gami,v_noff,v_moff,v_loff,v_nn, \ v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz,v_dx1, \ v_dx,v_dy,v_dz,v_vx,v_vy,v_vz,v_ux,v_uy,v_uz,v_edgelx,v_edgely, \ v_edgelz,v_edgerx,v_edgery,v_edgerz,v_gami,v_at,v_as,a,b,c,d,e,f,g,h, \ p,q,r,s,t,u,v,ws,wt,wu,wv,cp,cr,msk,msk1,msk2,kk,scu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); ih = 0; nh = 0; nn += 1; mm += 1; ll += 1; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* find inverse gamma */ /* ux = ppart[j+3*nppmx+npoff]; */ /* uy = ppart[j+4*nppmx+npoff]; */ /* uz = ppart[j+5*nppmx+npoff]; */ v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); /* p2 = ux*ux + uy*uy + uz*uz; */ v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux)); v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* calculate weights */ /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ a = _mm512_mul_ps(v_amx,v_amz); b = _mm512_mul_ps(v_amy,v_amz); c = _mm512_mul_ps(v_dyp,v_amz); d = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ e = _mm512_mul_ps(v_amx,v_dzp); f = _mm512_mul_ps(v_amy,v_dzp); g = _mm512_mul_ps(v_dyp,v_dzp); h = _mm512_mul_ps(v_dx1,v_dzp); /* deposit current */ /* vx = ux*gami; */ /* vy = uy*gami; */ /* vz = uz*gami; */ v_vx = _mm512_mul_ps(v_ux,v_gami); v_vy = _mm512_mul_ps(v_uy,v_gami); v_vz = _mm512_mul_ps(v_uz,v_gami); v_ll = _mm512_add_epi32(v_nn,v_mxyv4); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { ii = i >> 2; if (i==(ii<<2)) { switch (ii) { case 0: /* replicate velocities of first group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,0); q = _mm512_permute4f128_ps(v_vy,0); r = _mm512_permute4f128_ps(v_vz,0); /* regroup weights for first group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); break; case 1: /* replicate velocities of second group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,85); q = _mm512_permute4f128_ps(v_vy,85); r = _mm512_permute4f128_ps(v_vz,85); /* regroup weights for second group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); break; case 2: /* replicate velocities of third group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,170); q = _mm512_permute4f128_ps(v_vy,170); r = _mm512_permute4f128_ps(v_vz,170); /* regroup weights for third group of 4 particles */ s = _mm512_mask_permute4f128_ps(a, _mm512_int2mask(61680),b,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(c, _mm512_int2mask(61680),d,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(e, _mm512_int2mask(61680),f,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(g, _mm512_int2mask(61680),h,177); v = _mm512_permute4f128_ps(v,78); break; case 3: /* replicate velocities of fourth group of 4 particles */ p = _mm512_permute4f128_ps(v_vx,255); q = _mm512_permute4f128_ps(v_vy,255); r = _mm512_permute4f128_ps(v_vz,255); /* regroup weights for fourth group of 4 particles */ s = _mm512_mask_permute4f128_ps(b, _mm512_int2mask(3855),a,177); s = _mm512_permute4f128_ps(s,78); t = _mm512_mask_permute4f128_ps(d, _mm512_int2mask(3855),c,177); t = _mm512_permute4f128_ps(t,78); u = _mm512_mask_permute4f128_ps(f, _mm512_int2mask(3855),e,177); u = _mm512_permute4f128_ps(u,78); v = _mm512_mask_permute4f128_ps(h, _mm512_int2mask(3855),g,177); v = _mm512_permute4f128_ps(v,78); break; } } v_it = _mm512_setzero_epi32(); switch (i-(ii<<2)) { /* first particle */ case 0: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,0); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,0); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,0); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,0); break; /* second particle */ case 1: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,24); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,85); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,85); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,85); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,85); break; /* third particle */ case 2: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)p, _mm512_int2mask(170),(__m512i)q,177); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)r, _mm512_int2mask(51),(__m512i)v_at,78); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,170); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,170); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,170); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,170); break; /* fourth particle */ case 3: /* reorder velocity components */ v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)q, _mm512_int2mask(85),(__m512i)p,177); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_at,78); v_at = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at, _mm512_int2mask(68),(__m512i)r,177); /* reorder weights */ ws = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)s,255); wt = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)t,255); wu = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)u,255); wv = (__m512)_mm512_mask_shuffle_epi32(v_it, _mm512_int2mask(119),(__m512i)v,255); break; } _mm512_store_epi32(kk,v_nn); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),ws,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*amz; */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wt,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); _mm512_store_epi32(kk,v_ll); /* nn += 4*mxyv; */ /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*dzp; */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ mm = kk[i]; cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wu,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cp); /* mm = nn + 4*mxv; */ /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dx = dyp*dzp; */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ mm = kk[i] + 4*mxv; cr = _mm512_mask_loadunpacklo_ps(cr,_mm512_int2mask(255), &scu[mm]); cr = _mm512_mask_loadunpackhi_ps(cr,_mm512_int2mask(255), &scu[mm+16]); cr = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),wv,cr); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255),cr); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255),cr); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(v_vx,v_dt,v_x); v_dy = _mm512_fmadd_ps(v_vy,v_dt,v_y); v_dz = _mm512_fmadd_ps(v_vz,v_dt,v_z); /* find particles going out of bounds */ /* mm = 0; */ v_mm = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* mm = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) v_dx = v_x; } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dx = v_x; } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* mm += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) v_dy = v_x; } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dy = v_x; } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* mm += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) v_dz = v_x; } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* mm += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* mm += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_mm = _mm512_add_epi32(v_mm,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) v_dz = v_x; } } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(kk,v_mm); for (i = 0; i < 16; i++) { mm = kk[i]; if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; /* find inverse gamma */ ux = ppart[j+3*nppmx+npoff]; uy = ppart[j+4*nppmx+npoff]; uz = ppart[j+5*nppmx+npoff]; p2 = ux*ux + uy*uy + uz*uz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ux*gami; vy = uy*gami; vz = uz*gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx = dx - anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0f; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy = dy - any; mm += 6; } else if (dy < edgely) { if (dy < 0.0f) { dy += any; if (dy < any) mm += 3; else dy = 0.0f; } else { mm += 3; } } if (dz >= edgerz) { if (dz >= anz) dz = dz - anz; mm += 18; } else if (dz < edgelz) { if (dz < 0.0f) { dz += anz; if (dz < anz) mm += 9; else dz = 0.0f; } else { mm += 9; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; /* increment counters */ if (mm > 0) { ncl[mm+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*l)] = j + 1; ihole[1+2*(ih+(ntmax+1)*l)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 4*(nn/4); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[4*(i+mxv*j+mxyv*k)]; */ /* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[1+4*(i+mxv*j+mxyv*k)]; */ /* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[2+4*(i+mxv*j+mxyv*k)]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + mxv*j + mxyv*k); v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]); v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]); m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]); v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]); /* skip add for first elements for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&cu[m],v_at); _mm512_packstorehi_ps(&cu[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*l] = ih; } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cknc2gjppost3lt(float ppart[], float cu[], int kpic[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 69 flops/particle, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*vi, where i = x,y,z ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = velocity vx of particle n in tile m ppart[m][4][n] = velocity vy of particle n in tile m ppart[m][5][n] = velocity vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv; float edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float x, y, z; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_dt, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_at, v_as, v_dx, v_dy, v_dz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 cp; __mmask16 msk, v_m; __attribute__((aligned(64))) unsigned int kk[16]; typedef union vfloat {float v[16]; __m512 v16;} vf; __attribute__((aligned(64))) float scu[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */ vf vv[8], vc[3], vu; mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_qm = _mm512_set1_ps(qm); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y, \ z,vx,vy,vz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,v_noff,v_moff,v_loff, \ v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx,v_amy,v_amz, \ v_dx1,v_dx,v_dy,v_dz,v_at,v_as,cp,msk,kk,scu,vv,vc,vu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ vv[0].v16 = _mm512_mul_ps(v_amx,v_amz); vv[1].v16 = _mm512_mul_ps(v_amy,v_amz); vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz); vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp); vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp); vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp); vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit current */ /* vx = ppart[j+3*nppmx+npoff]; */ /* vy = ppart[j+4*nppmx+npoff]; */ /* vz = ppart[j+5*nppmx+npoff]; */ vc[0].v16 = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); vc[1].v16 = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); vc[2].v16 = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { nn = kk[i]; vu.v16 = _mm512_setzero_ps(); vu.v[0] = vc[0].v[i]; vu.v[1] = vc[1].v[i]; vu.v[2] = vc[2].v[i]; v_at = _mm512_permute4f128_ps(vu.v16,0); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ vu.v[0] = vv[0].v[i]; vu.v[4] = vv[1].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[nn]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[nn+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255), cp); mm = nn + 4*mxv; /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* dx = amx*dzp; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ /* dx = amx*dzp; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ vu.v[0] = vv[2].v[i]; vu.v[4] = vv[3].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255), cp); nn += 4*mxyv; /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*dzp; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ vu.v[0] = vv[4].v[i]; vu.v[4] = vv[5].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[nn]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[nn+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255), cp); mm = nn + 4*mxv; /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ vu.v[0] = vv[6].v[i]; vu.v[4] = vv[7].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255), cp); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(vc[0].v16,v_dt,v_x); v_dy = _mm512_fmadd_ps(vc[1].v16,v_dt,v_y); v_dz = _mm512_fmadd_ps(vc[2].v16,v_dt,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); vc[0].v16 = _mm512_mask_sub_ps(vc[0].v16,msk,v_zero,vc[0].v16); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],vc[0].v16); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); vc[1].v16 = _mm512_mask_sub_ps(vc[1].v16,msk,v_zero,vc[1].v16); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],vc[1].v16); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* ppart[j+5*nppmx+npoff] = -vz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); vc[2].v16 = _mm512_mask_sub_ps(vc[2].v16,msk,v_zero,vc[2].v16); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+5*nppmx+npoff],vc[2].v16); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -vx; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); vc[0].v16 = _mm512_mask_sub_ps(vc[0].v16,msk,v_zero,vc[0].v16); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],vc[0].v16); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -vy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); vc[1].v16 = _mm512_mask_sub_ps(vc[1].v16,msk,v_zero,vc[1].v16); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],vc[1].v16); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ppart[j+3*nppmx+npoff]; vy = ppart[j+4*nppmx+npoff]; vz = ppart[j+5*nppmx+npoff]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -vy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[j+5*nppmx+npoff] = -vz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -vy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 4*(nn/4); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[4*(i+mxv*j+mxyv*k)]; */ /* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[1+4*(i+mxv*j+mxyv*k)]; */ /* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[2+4*(i+mxv*j+mxyv*k)]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + mxv*j + mxyv*k); v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]); v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]); m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]); v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]); /* skip add for first elements for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&cu[m],v_at); _mm512_packstorehi_ps(&cu[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void cknc2grjppost3lt(float ppart[], float cu[], int kpic[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int nz, int mx, int my, int mz, int nxv, int nyv, int nzv, int mx1, int my1, int mxyz1, int ipbc) { /* for 3d code, this subroutine calculates particle current density using first-order linear interpolation for relativistic particles in addition, particle positions are advanced a half time-step OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 79 flops/particle, 1 divide, 1 sqrt, 30 loads, 27 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m,l)=qci*(1.-dx)*(1.-dy)*(1.-dz) cu(i,n+1,m,l)=qci*dx*(1.-dy)*(1.-dz) cu(i,n,m+1,l)=qci*(1.-dx)*dy*(1.-dz) cu(i,n+1,m+1,l)=qci*dx*dy*(1.-dz) cu(i,n,m,l+1)=qci*(1.-dx)*(1.-dy)*dz cu(i,n+1,m,l+1)=qci*dx*(1.-dy)*dz cu(i,n,m+1,l+1)=qci*(1.-dx)*dy*dz cu(i,n+1,m+1,l+1)=qci*dx*dy*dz where n,m,l = leftmost grid points and dx = x-n, dy = y-m, dz = z-l and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppart[m][3][n] = momentum vx of particle n in tile m ppart[m][4][n] = momentum vy of particle n in tile m ppart[m][5][n] = momentum vz of particle n in tile m cu[l][k][j][i] = ith component of current density at grid point j,k,l kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 6 nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z nxv = second dimension of current array, must be >= nx+1 nyv = third dimension of current array, must be >= ny+1 nzv = fourth dimension of current array, must be >= nz+1 mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mxyz1 = mx1*my1*mz1, where mz1 = (system length in z direction - 1)/mz + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,3d periodic,3d reflecting,mixed 2d reflecting/1d periodic) requires KNC, part needs to be 64 byte aligned nppmx needs to be a multiple of 16 cu needs to have 4 components, although one is not used local data */ #define MXV 17 #define MYV 17 #define MZV 17 int mxy1, noff, moff, loff, npoff, npp, nps; int i, j, k, l, m, nn, mm, ll, ii, nm, lm, mxv, myv, mxyv, nxyv; float ci2, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dxp, dyp, dzp, amx, amy, amz, dx1, dx, dy, dz, vx, vy, vz; float p2, gami; float x, y, z, ux, uy, uz; __m512i v_noff, v_moff, v_loff, v_mxv4, v_mxyv4; __m512i v_nn, v_mm, v_ll, v_it; __m512 v_qm, v_ci2, v_dt, v_one, v_zero; __m512 v_x, v_y, v_z, v_dxp, v_dyp, v_dzp, v_amx, v_amy, v_amz; __m512 v_dx1, v_gami, v_at, v_as, v_dx, v_dy, v_dz, v_ux, v_uy, v_uz; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 cp; __mmask16 msk, v_m; __attribute__((aligned(64))) unsigned int kk[16]; typedef union vfloat {float v[16]; __m512 v16;} vf; __attribute__((aligned(64))) float scu[4*MXV*MYV*MZV]; /* __attribute__((aligned(64))) float scu[4*(mx+1)*(my+1)*(mz+1)]; */ vf vv[8], vc[3], vu; mxy1 = mx1*my1; /* mxv = MXV; */ /* myv = MYV; */ mxv = mx+1; myv = my+1; mxyv = mxv*myv; nxyv = nxv*nyv; ci2 = ci*ci; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgelz = 0.0f; edgerx = (float) nx; edgery = (float) ny; edgerz = (float) nz; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgelz = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); edgerz = (float) (nz-1); } else if (ipbc==3) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } v_mxv4 = _mm512_set1_epi32(4*mxv); v_mxyv4 = _mm512_set1_epi32(4*mxyv); v_qm = _mm512_set1_ps(qm); v_ci2 = _mm512_set1_ps(ci2); v_dt = _mm512_set1_ps(dt); v_one = _mm512_set1_ps(1.0f); v_zero = _mm512_setzero_ps(); v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); v_at = _mm512_set_ps(0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,0.,1.,1.,1., 1.); v_m = _mm512_cmp_ps_mask(v_at,v_one,_MM_CMPINT_LT); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV) || (mz >= MZV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,m,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ii,nm,lm,x,y,z, \ vx,vy,vz,ux,uy,uz,dxp,dyp,dzp,amx,amy,amz,dx1,dx,dy,dz,p2,gami,v_noff, \ v_moff,v_loff,v_nn,v_mm,v_ll,v_it,v_x,v_y,v_z,v_dxp,v_dyp,v_dzp,v_amx, \ v_amy,v_amz,v_dx1,v_dx,v_dy,v_dz,v_ux,v_uy,v_uz,v_gami,v_at,v_as,cp, \ msk,kk,scu,vv,vc,vu) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm512_set1_epi32(noff); v_moff = _mm512_set1_epi32(moff); v_loff = _mm512_set1_epi32(loff); npp = kpic[l]; npoff = idimp*nppmx*l; /* zero out local accumulator */ /* for (j = 0; j < 4*mxyv*(mz+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxyv*(mz+1)*sizeof(float)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ /* z = ppart[j+2*nppmx+npoff]; */ v_x = _mm512_load_ps(&ppart[j+npoff]); v_y = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_z = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* nn = x; */ /* mm = y; */ /* ll = z; */ v_nn = _mm512_cvtfxpnt_round_adjustps_epi32(v_x, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_mm = _mm512_cvtfxpnt_round_adjustps_epi32(v_y, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); v_ll = _mm512_cvtfxpnt_round_adjustps_epi32(v_z, _MM_ROUND_MODE_DOWN,_MM_EXPADJ_NONE); /* dxp = qm*(x - (float) nn); */ /* dyp = y - (float) mm; */ /* dzp = z - (float) ll; */ v_dxp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_nn, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dxp = _mm512_mul_ps(v_qm,_mm512_sub_ps(v_x,v_dxp)); v_dyp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_mm, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dyp = _mm512_sub_ps(v_y,v_dyp); v_dzp = _mm512_cvtfxpnt_round_adjustepi32_ps(v_ll, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dzp = _mm512_sub_ps(v_z,v_dzp); /* find inverse gamma */ /* ux = ppart[j+3*nppmx+npoff]; */ /* uy = ppart[j+4*nppmx+npoff]; */ /* uz = ppart[j+5*nppmx+npoff]; */ v_ux = _mm512_load_ps(&ppart[j+3*nppmx+npoff]); v_uy = _mm512_load_ps(&ppart[j+4*nppmx+npoff]); v_uz = _mm512_load_ps(&ppart[j+5*nppmx+npoff]); /* p2 = ux*ux + uy*uy + uz*uz; */ v_at = _mm512_fmadd_ps(v_uy,v_uy,_mm512_mul_ps(v_ux,v_ux)); v_at = _mm512_fmadd_ps(v_uz,v_uz,v_at); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm512_rsqrt23_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* full accuracy calculation */ v_gami = _mm512_sqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); v_gami = _mm512_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm512_invsqrt_ps(_mm512_fmadd_ps(v_at,v_ci2,v_one)); */ /* calculate weights */ /* nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); */ v_nn = _mm512_sub_epi32(v_nn,v_noff); v_mm = _mm512_sub_epi32(v_mm,v_moff); v_ll = _mm512_sub_epi32(v_ll,v_loff); v_it = _mm512_mullo_epi32(v_mxyv4,v_ll); v_it = _mm512_add_epi32(v_it,_mm512_mullo_epi32(v_mxv4,v_mm)); v_nn = _mm512_add_epi32(_mm512_slli_epi32(v_nn,2),v_it); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ /* amz = 1.0f - dzp; */ v_amx = _mm512_sub_ps(v_qm,v_dxp); v_amy = _mm512_sub_ps(v_one,v_dyp); v_amz = _mm512_sub_ps(v_one,v_dzp); /* dx1 = dxp*dyp; */ /* dyp = amx*dyp; */ /* amx = amx*amy; */ /* amy = dxp*amy; */ v_dx1 = _mm512_mul_ps(v_dxp,v_dyp); v_dyp = _mm512_mul_ps(v_amx,v_dyp); v_amx = _mm512_mul_ps(v_amx,v_amy); v_amy = _mm512_mul_ps(v_dxp,v_amy); /* a = amx*amz; */ /* b = amy*amz; */ /* c = dyp*amz; */ /* d = dx1*amz; */ vv[0].v16 = _mm512_mul_ps(v_amx,v_amz); vv[1].v16 = _mm512_mul_ps(v_amy,v_amz); vv[2].v16 = _mm512_mul_ps(v_dyp,v_amz); vv[3].v16 = _mm512_mul_ps(v_dx1,v_amz); /* e = amx*dzp; */ /* f = amy*dzp; */ /* g = dyp*dzp; */ /* h = dx1*dzp; */ vv[4].v16 = _mm512_mul_ps(v_amx,v_dzp); vv[5].v16 = _mm512_mul_ps(v_amy,v_dzp); vv[6].v16 = _mm512_mul_ps(v_dyp,v_dzp); vv[7].v16 = _mm512_mul_ps(v_dx1,v_dzp); _mm512_store_epi32(kk,v_nn); /* deposit current */ /* vx = ux*gami; */ /* vy = uy*gami; */ /* vz = uz*gami; */ vc[0].v16 = _mm512_mul_ps(v_ux,v_gami); vc[1].v16 = _mm512_mul_ps(v_uy,v_gami); vc[2].v16 = _mm512_mul_ps(v_uz,v_gami); v_ll = _mm512_add_epi32(v_nn,v_mxyv4); /* deposit charge for one particle at a time */ for (i = 0; i < 16; i++) { nn = kk[i]; vu.v16 = _mm512_setzero_ps(); vu.v[0] = vc[0].v[i]; vu.v[1] = vc[1].v[i]; vu.v[2] = vc[2].v[i]; v_at = _mm512_permute4f128_ps(vu.v16,0); /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dx = amx*amz; */ /* dy = amy*amz; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*amz; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ vu.v[0] = vv[0].v[i]; vu.v[4] = vv[1].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[nn]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[nn+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255), cp); mm = nn + 4*mxv; /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dy = dx1*amz; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* dx = amx*dzp; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ /* dx = amx*dzp; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ vu.v[0] = vv[2].v[i]; vu.v[4] = vv[3].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255), cp); nn += 4*mxyv; /* load scu[nn:nn+3] and scu[nn+4:nn+7] field components */ /* dy = amy*dzp; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dx = dyp*dzp; */ /* scu[nn+4] += vx*dy; */ /* scu[nn+1+4] += vy*dy; */ /* scu[nn+2+4] += vz*dy; */ vu.v[0] = vv[4].v[i]; vu.v[4] = vv[5].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[nn]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[nn+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[nn],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[nn+16],_mm512_int2mask(255), cp); mm = nn + 4*mxv; /* load scu[mm:mm+3] and scu[mm+4:mm+7] field components */ /* dy = dx1*dzp; */ /* scu[mm] += vx*dx; */ /* scu[mm+1] += vy*dx; */ /* scu[mm+2] += vz*dx; */ /* scu[mm+4] += vx*dy; */ /* scu[mm+1+4] += vy*dy; */ /* scu[mm+2+4] += vz*dy; */ vu.v[0] = vv[6].v[i]; vu.v[4] = vv[7].v[i]; v_as = (__m512)_mm512_shuffle_epi32 ((__m512i)vu.v16,0); cp = _mm512_mask_loadunpacklo_ps(cp,_mm512_int2mask(255), &scu[mm]); cp = _mm512_mask_loadunpackhi_ps(cp,_mm512_int2mask(255), &scu[mm+16]); cp = _mm512_mask_fmadd_ps(v_at,_mm512_int2mask(119),v_as,cp); _mm512_mask_packstorelo_ps(&scu[mm],_mm512_int2mask(255), cp); _mm512_mask_packstorehi_ps(&scu[mm+16],_mm512_int2mask(255), cp); } /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ /* dz = z + vz*dt; */ v_dx = _mm512_fmadd_ps(vc[0].v16,v_dt,v_x); v_dy = _mm512_fmadd_ps(vc[1].v16,v_dt,v_y); v_dz = _mm512_fmadd_ps(vc[2].v16,v_dt,v_z); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -ux; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -uy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy); /* if ((dz < edgelz) || (dz >= edgerz)) { */ /* dz = z; */ /* ppart[j+5*nppmx+npoff] = -uz; */ /* } */ msk = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dz,v_edgerz, _MM_CMPINT_GE)); v_dz = _mm512_mask_blend_ps(msk,v_dz,v_z); v_uz = _mm512_mask_sub_ps(v_uz,msk,v_zero,v_uz); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+5*nppmx+npoff],v_uz); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+3*nppmx+npoff] = -ux; */ /* } */ msk = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dx,v_edgerx, _MM_CMPINT_GE)); v_dx = _mm512_mask_blend_ps(msk,v_dx,v_x); v_ux = _mm512_mask_sub_ps(v_ux,msk,v_zero,v_ux); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+3*nppmx+npoff],v_ux); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+4*nppmx+npoff] = -uy; */ /* } */ msk = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); msk = _mm512_kor(msk,_mm512_cmp_ps_mask(v_dy,v_edgery, _MM_CMPINT_GE)); v_dy = _mm512_mask_blend_ps(msk,v_dy,v_y); v_uy = _mm512_mask_sub_ps(v_uy,msk,v_zero,v_uy); /* write output if test result is true for any particle */ if (msk) _mm512_store_ps(&ppart[j+4*nppmx+npoff],v_uy); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ /* ppart[j+2*nppmx+npoff] = dz; */ _mm512_store_ps(&ppart[j+npoff],v_dx); _mm512_store_ps(&ppart[j+nppmx+npoff],v_dy); _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_dz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; z = ppart[j+2*nppmx+npoff]; nn = x; mm = y; ll = z; dxp = qm*(x - (float) nn); dyp = y - (float) mm; dzp = z - (float) ll; /* find inverse gamma */ ux = ppart[j+3*nppmx+npoff]; uy = ppart[j+4*nppmx+npoff]; uz = ppart[j+5*nppmx+npoff]; p2 = ux*ux + uy*uy + uz*uz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 4*(nn - noff + mxv*(mm - moff) + mxyv*(ll - loff)); amx = qm - dxp; amy = 1.0f - dyp; dx1 = dxp*dyp; dyp = amx*dyp; amx = amx*amy; amz = 1.0f - dzp; amy = dxp*amy; /* deposit current within tile to local accumulator */ dx = amx*amz; dy = amy*amz; vx = ux*gami; vy = uy*gami; vz = uz*gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*amz; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*amz; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; dx = amx*dzp; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; dy = amy*dzp; nn += 4*mxyv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = dyp*dzp; scu[nn+4] += vx*dy; scu[nn+1+4] += vy*dy; scu[nn+2+4] += vz*dy; dy = dx1*dzp; mm = nn + 4*mxv; scu[mm] += vx*dx; scu[mm+1] += vy*dx; scu[mm+2] += vz*dx; scu[mm+4] += vx*dy; scu[mm+1+4] += vy*dy; scu[mm+2+4] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; dz = z + vz*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -ux; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -uy; } if ((dz < edgelz) || (dz >= edgerz)) { dz = z; ppart[j+5*nppmx+npoff] = -uz; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+3*nppmx+npoff] = -ux; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+4*nppmx+npoff] = -uy; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; ppart[j+2*nppmx+npoff] = dz; } /* deposit current to interior points in global array */ nn = nxv - noff; nn = mx < nn ? mx : nn; mm = nyv - moff; mm = my < mm ? my : mm; ll = nzv - loff; ll = mz < ll ? mz : ll; nps = 4*(nn/4); for (k = 1; k < ll; k++) { for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[4*(i+mxv*j+mxyv*k)]; */ /* cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[1+4*(i+mxv*j+mxyv*k)]; */ /* cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] */ /* += scu[2+4*(i+mxv*j+mxyv*k)]; */ /* } */ for (i = 0; i < nps; i+=4) { m = 4*(i + mxv*j + mxyv*k); v_as = _mm512_loadunpacklo_ps(v_as,&scu[m]); v_as = _mm512_loadunpackhi_ps(v_as,&scu[m+16]); m = 4*(i + noff + nxv*(j + moff) + nxyv*(k + loff)); v_at = _mm512_loadunpacklo_ps(v_at,&cu[m]); v_at = _mm512_loadunpackhi_ps(v_at,&cu[m+16]); /* skip add for first elements for i = 0 */ if (i==0) v_at = _mm512_mask_add_ps(v_at,v_m,v_at,v_as); else v_at = _mm512_add_ps(v_at,v_as); _mm512_packstorelo_ps(&cu[m],v_at); _mm512_packstorehi_ps(&cu[m+16],v_at); } /* loop over remaining elements */ m = 1 > nps ? 1 : nps; for (i = m; i < nn; i++) { cu[4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(i+mxv*j+mxyv*k)]; cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(i+mxv*j+mxyv*k)]; cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(i+mxv*j+mxyv*k)]; } } } /* deposit current to edge points in global array */ lm = nzv - loff; lm = mz+1 < lm ? mz+1 : lm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[4*(i+mxv*j)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[1+4*(i+mxv*j)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*loff)] += scu[2+4*(i+mxv*j)]; if (lm > mz) { #pragma omp atomic cu[4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*j+mxyv*(lm-1))]; } } } nm = nxv - noff; nm = mx+1 < nm ? mx+1 : nm; mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (k = 0; k < ll; k++) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[4*(i+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[1+4*(i+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(k+loff))] += scu[2+4*(i+mxyv*k)]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[1+4*(i+mxv*(mm-1)+mxyv*k)]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(k+loff))] += scu[2+4*(i+mxv*(mm-1)+mxyv*k)]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(mxv*j+mxyv*k)]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[1+4*(nm-1+mxv*j+mxyv*k)]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(k+loff))] += scu[2+4*(nm-1+mxv*j+mxyv*k)]; } } } if (lm > mz) { for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[1+4*(i+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff+nxyv*(lm+loff-1))] += scu[2+4*(i+mxyv*(lm-1))]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[1+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1)+nxyv*(lm+loff-1))] += scu[2+4*(i+mxv*(mm-1)+mxyv*(lm-1))]; } } for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(mxv*j+mxyv*(lm-1))]; if (nm > mx) { #pragma omp atomic cu[4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[1+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[1+4*(nm-1+mxv*j+mxyv*(lm-1))]; #pragma omp atomic cu[2+4*(nm+noff-1+nxv*(j+moff)+nxyv*(lm+loff-1))] += scu[2+4*(nm-1+mxv*j+mxyv*(lm-1))]; } } } } return; #undef MXV #undef MYV #undef MZV } /*--------------------------------------------------------------------*/ void ckncpporder3lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppbuff[m][i][n] = i co-ordinate of particle n in tile m kpic[m] = number of particles in tile m ncl[m][i] = number of particles going to destination i, tile m ihole[m][:][0] = location of hole in array left by departing particle ihole[m][:][1] = direction destination of particle leaving hole all for tile m ihole[m][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart, ppbuff need to be 64 byte aligned nppmx, npbmx need to be a multiple of 16 local data */ int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dx, dy, dz; int ks[26]; __m512i v_ist, v_it, v_0, v_1, v_3, v_9; __m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff; __m512 v_anx, v_any, v_anz; __m512 v_dx, v_dy, v_dz, v_x; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 v_zero; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int ls[32], lm[32]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; anx = (float) nx; any = (float) ny; anz = (float) nz; v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); v_zero = _mm512_setzero_ps(); /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \ dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \ v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \ msk2,ls) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; noff = (ntmax+1)*l; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* dx = ppart[j+npoff]; */ /* dy = ppart[j+nppmx+npoff]; */ /* dz = ppart[j+2*nppmx+npoff]; */ v_dx = _mm512_load_ps(&ppart[j+npoff]); v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* find particles going out of bounds */ /* ist = 0; */ v_ist = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* ist = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* ist = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* ist = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* ist += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* ist += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* ist += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* ist += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* ist += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* ist += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } } /* increment counters */ /* if (ist > 0) { */ /* ncl[ist+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(ls,v_ist); for (i = 0; i < 16; i++) { ist = ls[i]; if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + i + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } } /* loop over remaining particles in tile */ for (j = nps; j < npp; j++) { dx = ppart[j+npoff]; dy = ppart[j+nppmx+npoff]; dz = ppart[j+2*nppmx+npoff]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[j+npoff] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[j+npoff] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[j+nppmx+npoff] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[j+nppmx+npoff] = dy; } else { ist += 3; } } if (dz >= edgerz) { if (dz >= anz) ppart[j+2*nppmx+npoff] = dz - anz; ist += 18; } else if (dz < edgelz) { if (dz < 0.0) { dz += anz; if (dz < anz) ist += 9; else dz = 0.0; ppart[j+2*nppmx+npoff] = dz; } else { ist += 9; } } if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*noff] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ msk1 = _mm512_int2mask(1023); v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0); v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0); #pragma omp parallel for \ private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \ v_it0,v_ioff,ls,lm) for (l = 0; l < mxyz1; l++) { npoff = idimp*nppmx*l; nboff = idimp*npbmx*l; noff = (ntmax+1)*l; /* find address offset for ordered ppbuff array */ /* isum = 0; */ /* for (j = 0; j < 26; j++) { */ /* ist = ncl[j+26*l]; */ /* ncl[j+26*l] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ /* load 26 data elements into 32 length vector with zero padding */ mm = 26*l; v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]); _mm512_store_epi32(ls,v_it); v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]); v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]); _mm512_store_epi32(&ls[16],v_is); v_ioff = _mm512_setzero_epi32(); /* vector loop over elements in blocks of 16 */ for (j = 0; j < 32; j+=16) { /* load data */ v_it0 = _mm512_load_epi32(&ls[j]); /* first pass */ v_is = _mm512_shuffle_epi32(v_it0,177); v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690), v_it0,v_is); /* second pass */ v_is = _mm512_shuffle_epi32(v_it,80); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it, v_is); /* third pass */ v_is = _mm512_permutevar_epi32(v_m1,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it, v_is); /* fourth pass */ v_is = _mm512_permutevar_epi32(v_m2,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it, v_is); /* add offset */ v_it = _mm512_add_epi32(v_it,v_ioff); /* next offset */ if (j==0) { v_ioff = _mm512_shuffle_epi32(v_it,255); v_ioff = _mm512_permute4f128_epi32(v_ioff,255); } /* subtract for exclusive scan */ v_it = _mm512_sub_epi32(v_it,v_it0); /* write data */ _mm512_store_epi32(&ls[j],v_it); } nh = ihole[2*noff]; nps = 16*(nh/16); /* nps = (nh >> 4) << 4; */ ip = 0; /* loop over particles leaving tile in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */ /* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */ mm = 2*(j+1+noff); v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]); _mm512_store_epi32(lm,v_it); mm += 16; v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]); _mm512_store_epi32(&lm[16],v_is); /* buffer particles that are leaving tile, in direction order */ for (ll = 0; ll < 16; ll++) { j1 = lm[2*ll] - 1; ist = lm[1+2*ll]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } /* store 26 data elements into ncl */ mm = 26*l; v_it = _mm512_load_epi32(ls); v_is = _mm512_load_epi32(&ls[16]); _mm512_packstorelo_epi32(&ncl[mm],v_it); _mm512_packstorehi_epi32(&ncl[mm+16],v_it); _mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is); _mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is); /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); v_m1 = _mm512_set1_epi32(nppmx); #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \ lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \ v_npp,v_x,msk1,ks,ls) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; npoff = idimp*nppmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_it0 = _mm512_set1_epi32(nh); v_is = _mm512_add_epi32(v_m2,v_it0); v_it0 = _mm512_sub_epi32(v_ioff,v_it0); v_npp = _mm512_set1_epi32(npp); for (ii = 0; ii < 26; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in this direction in groups of 16 */ for (j = 0; j < nps; j+=16) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0); msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm); v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm, v_npp); v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm, (int *)ihole,4); v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1); ih += 16; nn = ih - nh; if (nn > 0) { nn = nn < 16 ? nn : 16; npp += nn; } msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT); ll = _mm512_mask2int(_mm512_knot(msk1)); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ mm = j + ncoff + npbmx*i + nboff; v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]); v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]); if (ll==0) { _mm512_i32scatter_ps((float *)ppart,v_it,v_x,4); } else { _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it, v_x,4); } v_it = _mm512_add_epi32(v_it,v_m1); } if (ll != 0) { ist = 1; } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ /* holes with locations great than npp-ip do not need to be filled */ if (ih < nh) { ip = nh - ih; ii = nh; nn = ihole[2*(ii+noff)] - 1; v_it0 = _mm512_set1_epi32(nn); ih += 1; j2 = ihole[2*(ih+noff)] - 1; v_m2 = _mm512_sub_epi32(v_m2,v_1); /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j2 = ihole[2*(ih+noff)] - 1; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_is = _mm512_sub_epi32(v_is,v_1); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */ /* } */ kk = 0; for (ll = 0; ll < 16; ll++) { j1 = npp - j - ll - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+(ntmax+1)*l)] - 1; } else { ls[kk] = j1; kk += 1; } } v_it = _mm512_load_epi32(ls); v_it0 = _mm512_set1_epi32(kk); msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT); v_is = _mm512_add_epi32(v_is,v_m3); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ if (kk==16) { v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppart,v_is,v_x,4); } else { v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it, (float *)ppart,4); _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is, v_x,4); } v_is = _mm512_add_epi32(v_is,v_m1); v_it = _mm512_add_epi32(v_it,v_m1); } ih += kk; /* holes with locations great than npp-ip do not need to be filled */ } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*l)] - 1; } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void ckncpporderf3lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then we copy the incoming particles from other tiles into ppart. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the ckncgppushf3lt subroutine. input: all except ppbuff, irc output: ppart, ppbuff, kpic, ncl, irc ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppbuff[m][i][n] = i co-ordinate of particle n in tile m kpic[m] = number of particles in tile m ncl[m][i] = number of particles going to destination i, tile m ihole[m][:][0] = location of hole in array left by departing particle ihole[m][:][1] = direction destination of particle leaving hole all for tile m ihole[m][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart, ppbuff need to be 64 byte aligned nppmx, npbmx need to be a multiple of 16 local data */ int mxy1, mxyz1, noff, npp, npoff, nps, nboff, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; int ks[26]; __m512i v_it, v_0, v_1; __m512i v_m1, v_m2, v_m3, v_npp, v_mm, v_is, v_it0, v_ioff; __m512 v_x, v_zero; __mmask16 msk1; __attribute__((aligned(64))) unsigned int ls[32], lm[32]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_zero = _mm512_setzero_ps(); /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ msk1 = _mm512_int2mask(1023); v_m1 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0); v_m2 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0); #pragma omp parallel for \ private(i,j,l,npoff,nboff,noff,nps,mm,ii,ll,j1,ist,nh,ip,v_it,v_is, \ v_it0,v_ioff,ls,lm) for (l = 0; l < mxyz1; l++) { npoff = idimp*nppmx*l; nboff = idimp*npbmx*l; noff = (ntmax+1)*l; /* find address offset for ordered ppbuff array */ /* isum = 0; */ /* for (j = 0; j < 26; j++) { */ /* ist = ncl[j+26*l]; */ /* ncl[j+26*l] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ /* load 26 data elements into 32 length vector with zero padding */ mm = 26*l; v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]); _mm512_store_epi32(ls,v_it); v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]); v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]); _mm512_store_epi32(&ls[16],v_is); v_ioff = _mm512_setzero_epi32(); /* vector loop over elements in blocks of 16 */ for (j = 0; j < 32; j+=16) { /* load data */ v_it0 = _mm512_load_epi32(&ls[j]); /* first pass */ v_is = _mm512_shuffle_epi32(v_it0,177); v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690), v_it0,v_is); /* second pass */ v_is = _mm512_shuffle_epi32(v_it,80); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it, v_is); /* third pass */ v_is = _mm512_permutevar_epi32(v_m1,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it, v_is); /* fourth pass */ v_is = _mm512_permutevar_epi32(v_m2,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it, v_is); /* add offset */ v_it = _mm512_add_epi32(v_it,v_ioff); /* next offset */ if (j==0) { v_ioff = _mm512_shuffle_epi32(v_it,255); v_ioff = _mm512_permute4f128_epi32(v_ioff,255); } /* subtract for exclusive scan */ v_it = _mm512_sub_epi32(v_it,v_it0); /* write data */ _mm512_store_epi32(&ls[j],v_it); } nh = ihole[2*noff]; nps = 16*(nh/16); /* nps = (nh >> 4) << 4; */ ip = 0; /* loop over particles leaving tile in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */ /* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */ mm = 2*(j+1+noff); v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]); _mm512_store_epi32(lm,v_it); mm += 16; v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]); _mm512_store_epi32(&lm[16],v_is); /* buffer particles that are leaving tile, in direction order */ for (ll = 0; ll < 16; ll++) { j1 = lm[2*ll] - 1; ist = lm[1+2*ll]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ls[ist-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ls[ist-1] = ii + 1; } /* store 26 data elements into ncl */ mm = 26*l; v_it = _mm512_load_epi32(ls); v_is = _mm512_load_epi32(&ls[16]); _mm512_packstorelo_epi32(&ncl[mm],v_it); _mm512_packstorehi_epi32(&ncl[mm+16],v_it); _mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is); _mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is); /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); v_m1 = _mm512_set1_epi32(nppmx); #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \ lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \ v_npp,v_x,msk1,ks,ls) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; npoff = idimp*nppmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_it0 = _mm512_set1_epi32(nh); v_is = _mm512_add_epi32(v_m2,v_it0); v_it0 = _mm512_sub_epi32(v_ioff,v_it0); v_npp = _mm512_set1_epi32(npp); for (ii = 0; ii < 26; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in this direction in groups of 16 */ for (j = 0; j < nps; j+=16) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0); msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm); v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm, v_npp); v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm, (int *)ihole,4); v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1); ih += 16; nn = ih - nh; if (nn > 0) { nn = nn < 16 ? nn : 16; npp += nn; } msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT); ll = _mm512_mask2int(_mm512_knot(msk1)); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ mm = j + ncoff + npbmx*i + nboff; v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]); v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]); if (ll==0) { _mm512_i32scatter_ps((float *)ppart,v_it,v_x,4); } else { _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it, v_x,4); } v_it = _mm512_add_epi32(v_it,v_m1); } if (ll != 0) { ist = 1; } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ /* holes with locations great than npp-ip do not need to be filled */ if (ih < nh) { ip = nh - ih; ii = nh; nn = ihole[2*(ii+noff)] - 1; v_it0 = _mm512_set1_epi32(nn); ih += 1; j2 = ihole[2*(ih+noff)] - 1; v_m2 = _mm512_sub_epi32(v_m2,v_1); /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j2 = ihole[2*(ih+noff)] - 1; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_is = _mm512_sub_epi32(v_is,v_1); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */ /* } */ kk = 0; for (ll = 0; ll < 16; ll++) { j1 = npp - j - ll - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+(ntmax+1)*l)] - 1; } else { ls[kk] = j1; kk += 1; } } v_it = _mm512_load_epi32(ls); v_it0 = _mm512_set1_epi32(kk); msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT); v_is = _mm512_add_epi32(v_is,v_m3); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ if (kk==16) { v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppart,v_is,v_x,4); } else { v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it, (float *)ppart,4); _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is, v_x,4); } v_is = _mm512_add_epi32(v_is,v_m1); v_it = _mm512_add_epi32(v_it,v_m1); } ih += kk; /* holes with locations great than npp-ip do not need to be filled */ } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*l)] - 1; } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void ckncpp2order3lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int nz, int mx, int my, int mz, int mx1, int my1, int mz1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y,z grid in tiles of mx, my, mz linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 3D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = position z of particle n in tile m ppbuff[m][i][n] = i co-ordinate of particle n in tile m kpic[m] = number of particles in tile m ncl[m][i] = number of particles going to destination i, tile m ihole[m][:][0] = location of hole in array left by departing particle ihole[m][:][1] = direction destination of particle leaving hole all for tile m ihole[m][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 6 nppmx = maximum number of particles in tile nx/ny/nz = system length in x/y/z direction mx/my/mz = number of grids in sorting cell in x/y/z mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 mz1 = (system length in z direction - 1)/mz + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires KNC, ppart, ppbuff need to be 64 byte aligned nppmx, npbmx need to be a multiple of 16 local data */ int mxy1, mxyz1, noff, moff, loff, npoff, npp, nps, nboff, ncoff; int i, j, k, l, ii, kx, ky, kz, ih, nh, ist, nn, mm, ll; int ip, j1, j2, kxl, kxr, kk, kl, kr, lk, lr; float anx, any, anz, edgelx, edgely, edgelz, edgerx, edgery, edgerz; float dx, dy, dz; int ks[26]; __m512i v_ist, v_it, v_0, v_1, v_3, v_9; __m512i v_m1, v_m2, v_m3, v_m4, v_npp, v_mm, v_is, v_it0, v_ioff; __m512 v_anx, v_any, v_anz; __m512 v_dx, v_dy, v_dz, v_x; __m512 v_edgelx, v_edgely, v_edgelz, v_edgerx, v_edgery, v_edgerz; __m512 v_zero; __mmask16 msk1, msk2; __attribute__((aligned(64))) unsigned int ls[16], lm[32]; mxy1 = mx1*my1; mxyz1 = mxy1*mz1; anx = (float) nx; any = (float) ny; anz = (float) nz; v_0 = _mm512_set1_epi32(0); v_1 = _mm512_set1_epi32(1); v_3 = _mm512_set1_epi32(3); v_9 = _mm512_set1_epi32(9); v_anx = _mm512_set1_ps(anx); v_any = _mm512_set1_ps(any); v_anz = _mm512_set1_ps(anz); /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ v_zero = _mm512_setzero_ps(); /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,l,ii,noff,moff,loff,npp,npoff,nps,nn,mm,ll,ih,nh,ist,dx, \ dy,dz,edgelx,edgely,edgelz,edgerx,edgery,edgerz,v_it,v_ist,v_edgelx, \ v_edgely,v_edgelz,v_edgerx,v_edgery,v_edgerz,v_dx,v_dy,v_dz,v_x,msk1, \ msk2,ls,lm) for (l = 0; l < mxyz1; l++) { loff = l/mxy1; k = l - mxy1*loff; loff = mz*loff; noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[l]; npoff = idimp*nppmx*l; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ll = nz - loff; ll = mz < ll ? mz : ll; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; edgelz = loff; edgerz = loff + ll; noff = (ntmax+1)*l; v_edgelx = _mm512_set1_ps(edgelx); v_edgely = _mm512_set1_ps(edgely); v_edgelz = _mm512_set1_ps(edgelz); v_edgerx = _mm512_set1_ps(edgerx); v_edgery = _mm512_set1_ps(edgery); v_edgerz = _mm512_set1_ps(edgerz); /* clear counters */ /* for (j = 0; j < 26; j++) { */ /* ncl[j+26*l] = 0; */ /* } */ memset((void*)&ncl[26*l],0,26*sizeof(int)); nps = 16*(npp/16); /* loop over particles in tile in blocks of 16 */ for (j = 0; j < nps; j+=16) { /* dx = ppart[j+npoff]; */ /* dy = ppart[j+nppmx+npoff]; */ /* dz = ppart[j+2*nppmx+npoff]; */ v_dx = _mm512_load_ps(&ppart[j+npoff]); v_dy = _mm512_load_ps(&ppart[j+nppmx+npoff]); v_dz = _mm512_load_ps(&ppart[j+2*nppmx+npoff]); /* find particles going out of bounds */ /* ist = 0; */ v_ist = _mm512_setzero_epi32(); /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* ist = 2; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dx,v_edgerx,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dx,v_edgelx,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dx; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_1,v_1); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dx,v_anx,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dx,v_anx); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* ist = 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* ist = 1; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_1); msk2 = _mm512_cmp_ps_mask(v_dx,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dx,v_anx); msk1 = _mm512_cmp_ps_mask(v_x,v_anx,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+npoff],v_x); } } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* ist += 6; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dy,v_edgery,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dy,v_edgely,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dy; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_3,v_3); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dy,v_any,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dy,v_any); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* ist += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* ist += 3; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_3); msk2 = _mm512_cmp_ps_mask(v_dy,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dy,v_any); msk1 = _mm512_cmp_ps_mask(v_x,v_any,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+nppmx+npoff],v_x); } } /* if (dz >= edgerz) { */ /* if (dz >= anz) */ /* ppart[j+2*nppmx+npoff] = dz - anz; */ /* ist += 18; */ /* } */ msk1 = _mm512_cmp_ps_mask(v_dz,v_edgerz,_MM_CMPINT_GE); msk2 = _mm512_cmp_ps_mask(v_dz,v_edgelz,_MM_CMPINT_LT); ii = _mm512_mask2int(_mm512_kor(msk1,msk2)); /* execute if either test result is true for any particle */ if (ii != 0) { ii = _mm512_mask2int(msk1); v_x = v_dz; /* write output if test result is true for any particle */ if (ii != 0) { v_it = _mm512_add_epi32(v_9,v_9); v_ist = _mm512_mask_add_epi32(v_ist,msk1,v_ist,v_it); msk1 = _mm512_cmp_ps_mask(v_dz,v_anz,_MM_CMPINT_GE); v_x = _mm512_mask_sub_ps(v_x,msk1,v_dz,v_anz); ii = _mm512_mask2int(msk1); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } /* if (dz < edgelz) { */ /* if (dz < 0.0) { */ /* dz += anz; */ /* if (dz < anz) */ /* ist += 9; */ /* else */ /* dz = 0.0; */ /* ppart[j+2*nppmx+npoff] = dz; */ /* } */ /* else { */ /* ist += 9; */ /* } */ /* } */ /* write output if test result is true for any particle */ ii = _mm512_mask2int(msk2); if (ii != 0) { v_it = _mm512_mask_mov_epi32(v_0,msk2,v_9); msk2 = _mm512_cmp_ps_mask(v_dz,v_zero,_MM_CMPINT_LT); v_x = _mm512_mask_add_ps(v_x,msk2,v_dz,v_anz); msk1 = _mm512_cmp_ps_mask(v_x,v_anz,_MM_CMPINT_GE); msk1 = _mm512_kand(msk1,msk2); v_x = _mm512_mask_mov_ps(v_x,msk1,v_zero); v_it = _mm512_mask_mov_epi32(v_it,msk1,v_0); v_ist = _mm512_add_epi32(v_ist,v_it); ii = _mm512_mask2int(msk2); if (ii != 0) _mm512_store_ps(&ppart[j+2*nppmx+npoff],v_x); } } /* increment counters */ /* if (ist > 0) { */ /* ncl[ist+26*l-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*l)] = j + i + 1; */ /* ihole[1+2*(ih+(ntmax+1)*l)] = ist; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm512_store_epi32(ls,v_ist); /* remove zero ist values and left shift data */ ll = 0; memset((void*)lm,0,32*sizeof(int)); for (i = 0; i < 16; i++) { ist = ls[i]; if (ist > 0) { lm[2*ll] = j + i + 1; lm[1+2*ll] = ist; ncl[ist+26*l-1] += 1; ll += 1; } } if (ll > 0) { if ((ih+ll) > ntmax) { nh = 1; } else { v_it = _mm512_load_epi32(lm); mm = 2*(ih+1+noff); _mm512_packstorelo_epi32(&ihole[mm],v_it); _mm512_packstorehi_epi32(&ihole[mm+16],v_it); if (ll > 8) { v_it = _mm512_load_epi32(&lm[16]); mm += 16; _mm512_packstorelo_epi32(&ihole[mm],v_it); _mm512_packstorehi_epi32(&ihole[mm+16],v_it); } } ih += ll; } } /* loop over remaining particles in tile */ for (j = nps; j < npp; j++) { dx = ppart[j+npoff]; dy = ppart[j+nppmx+npoff]; dz = ppart[j+2*nppmx+npoff]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[j+npoff] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[j+npoff] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[j+nppmx+npoff] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[j+nppmx+npoff] = dy; } else { ist += 3; } } if (dz >= edgerz) { if (dz >= anz) ppart[j+2*nppmx+npoff] = dz - anz; ist += 18; } else if (dz < edgelz) { if (dz < 0.0) { dz += anz; if (dz < anz) ist += 9; else dz = 0.0; ppart[j+2*nppmx+npoff] = dz; } else { ist += 9; } } if (ist > 0) { ncl[ist+26*l-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*noff] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ msk1 = _mm512_int2mask(1023); v_m1 = _mm512_set1_epi32(nppmx); v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); #pragma omp parallel for \ private(i,j,l,npoff,nboff,noff,nps,mm,ii,j1,ist,nh,ip,v_it,v_is,v_it0, \ v_mm,v_m2,v_m3,v_m4,lm) for (l = 0; l < mxyz1; l++) { npoff = idimp*nppmx*l; nboff = idimp*npbmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set_epi32(11,11,11,11,11,10,9,8,3,3,3,3,3,2,1,0); v_m3 = _mm512_set_epi32(7,7,7,7,7,7,7,7,7,6,5,4,3,2,1,0); /* find address offset for ordered ppbuff array */ /* isum = 0; */ /* for (j = 0; j < 26; j++) { */ /* ist = ncl[j+26*l]; */ /* ncl[j+26*l] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ /* load 26 data elements into 32 length vector with zero padding */ mm = 26*l; v_it = _mm512_loadunpacklo_epi32(v_0,&ncl[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ncl[mm+16]); _mm512_store_epi32(lm,v_it); v_is = _mm512_mask_loadunpacklo_epi32(v_0,msk1,&ncl[mm+16]); v_is = _mm512_mask_loadunpackhi_epi32(v_is,msk1,&ncl[mm+32]); _mm512_store_epi32(&lm[16],v_is); v_mm = _mm512_setzero_epi32(); /* vector loop over elements in blocks of 16 */ for (j = 0; j < 32; j+=16) { /* load data */ v_it0 = _mm512_load_epi32(&lm[j]); /* first pass */ v_is = _mm512_shuffle_epi32(v_it0,177); v_it = _mm512_mask_add_epi32(v_it0,_mm512_int2mask(43690), v_it0,v_is); /* second pass */ v_is = _mm512_shuffle_epi32(v_it,80); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(52428),v_it, v_is); /* third pass */ v_is = _mm512_permutevar_epi32(v_m2,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(61680),v_it, v_is); /* fourth pass */ v_is = _mm512_permutevar_epi32(v_m3,v_it); v_it = _mm512_mask_add_epi32(v_it,_mm512_int2mask(65280),v_it, v_is); /* add offset */ v_it = _mm512_add_epi32(v_it,v_mm); /* next offset */ if (j==0) { v_mm = _mm512_shuffle_epi32(v_it,255); v_mm = _mm512_permute4f128_epi32(v_mm,255); } /* subtract for exclusive scan */ v_it = _mm512_sub_epi32(v_it,v_it0); /* write data */ _mm512_store_epi32(&lm[j],v_it); } /* store 26 data elements into ncl */ v_it = _mm512_load_epi32(lm); v_is = _mm512_load_epi32(&lm[16]); _mm512_packstorelo_epi32(&ncl[mm],v_it); _mm512_packstorehi_epi32(&ncl[mm+16],v_it); _mm512_mask_packstorelo_epi32(&ncl[mm+16],msk1,v_is); _mm512_mask_packstorehi_epi32(&ncl[mm+32],msk1,v_is); nh = ihole[2*noff]; nps = 16*(nh/16); /* nps = (nh >> 4) << 4; */ ip = 0; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); v_m4 = _mm512_set1_epi32(nboff); v_it0 = _mm512_set1_epi32(npbmx); /* loop over particles leaving tile in groups of 16 */ for (j = 0; j < nps; j+=16) { /* buffer particles that are leaving tile, in direction order */ /* j1 = ihole[2*(j+1+(ntmax+1)*l)] - 1; */ /* ist = ihole[1+2*(j+1+(ntmax+1)*l)]; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(j),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_it = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_it = _mm512_sub_epi32(v_it,v_1); v_mm = _mm512_add_epi32(v_mm,v_1); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); _mm512_store_epi32(lm,v_is); for (ll = 0; ll < 16; ll++) { ist = lm[ll]; ii = ncl[ist+26*l-1]; if (ii < npbmx) { lm[ll] = ii; } else { ip = 1; } ncl[ist+26*l-1] = ii + 1; } v_is = _mm512_load_epi32(lm); v_it = _mm512_add_epi32(v_it,v_m3); v_is = _mm512_add_epi32(v_is,v_m4); if (ip==0) { for (i = 0; i < idimp; i++) { /* ppbuff[ii+npbmx*i+nboff] */ /* = ppart[j1+nppmx*i+npoff]; */ v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppbuff,v_is,v_x,4); v_it = _mm512_add_epi32(v_it,v_m1); v_is = _mm512_add_epi32(v_is,v_it0); } } /* mm = 2*(j+1+noff); v_it = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_it = _mm512_loadunpackhi_epi32(v_it,&ihole[mm+16]); _mm512_store_epi32(lm,v_it); mm += 16; v_is = _mm512_loadunpacklo_epi32(v_0,&ihole[mm]); v_is = _mm512_loadunpackhi_epi32(v_is,&ihole[mm+16]); _mm512_store_epi32(&lm[16],v_is); for (ll = 0; ll < 16; ll++) { j1 = lm[2*ll] - 1; ist = lm[1+2*ll]; ii = ncl[ist+26*l-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ncl[ist+26*l-1] = ii + 1; } */ } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ncl[ist+26*l-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ncl[ist+26*l-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[25+26*l]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ v_ioff = _mm512_set_epi32(15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0); v_m1 = _mm512_set1_epi32(nppmx); #pragma omp parallel for \ private(i,j,k,l,ii,kk,npp,nps,npoff,noff,nboff,kx,ky,kz,kl,kr,kxl,kxr, \ lk,ll,lr,ih,nh,nn,mm,ncoff,ist,j1,j2,ip,v_m2,v_m3,v_it,v_is,v_it0,v_mm, \ v_npp,v_x,msk1,ks,ls) for (l = 0; l < mxyz1; l++) { npp = kpic[l]; npoff = idimp*nppmx*l; noff = (ntmax+1)*l; v_m2 = _mm512_set1_epi32(noff+1); v_m3 = _mm512_set1_epi32(npoff); kz = l/mxy1; k = l - mxy1*kz; /* loop over tiles in z, assume periodic boundary conditions */ lk = kz*mxy1; /* find tile behind */ ll = kz - 1; if (ll < 0) ll += mz1; ll = ll*mxy1; /* find tile in front */ lr = kz + 1; if (lr >= mz1) lr -= mz1; lr = lr*mxy1; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1 ; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk + lk; ks[1] = kxl + kk + lk; ks[2] = kx + kr + lk; ks[3] = kxr + kr + lk; ks[4] = kxl + kr + lk; ks[5] = kx + kl + lk; ks[6] = kxr + kl + lk; ks[7] = kxl + kl + lk; ks[8] = kx + kk + lr; ks[9] = kxr + kk + lr; ks[10] = kxl + kk + lr; ks[11] = kx + kr + lr; ks[12] = kxr + kr + lr; ks[13] = kxl + kr + lr; ks[14] = kx + kl + lr; ks[15] = kxr + kl + lr; ks[16] = kxl + kl + lr; ks[17] = kx + kk + ll; ks[18] = kxr + kk + ll; ks[19] = kxl + kk + ll; ks[20] = kx + kr + ll; ks[21] = kxr + kr + ll; ks[22] = kxl + kr + ll; ks[23] = kx + kl + ll; ks[24] = kxr + kl + ll; ks[25] = kxl + kl + ll; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_it0 = _mm512_set1_epi32(nh); v_is = _mm512_add_epi32(v_m2,v_it0); v_it0 = _mm512_sub_epi32(v_ioff,v_it0); v_npp = _mm512_set1_epi32(npp); for (ii = 0; ii < 26; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+26*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+26*ks[ii]] - ncoff; /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in this direction in groups of 16 */ for (j = 0; j < nps; j+=16) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_it0); msk1 = _mm512_cmp_epi32_mask(v_mm,v_0,_MM_CMPINT_LT); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_is); v_mm = _mm512_mask_add_epi32(v_mm,msk1,v_mm,v_mm); v_mm = _mm512_mask_add_epi32(v_mm,_mm512_knot(msk1),v_mm, v_npp); v_it = _mm512_mask_i32gather_epi32(v_mm,msk1,v_mm, (int *)ihole,4); v_it = _mm512_mask_sub_epi32(v_it,msk1,v_it,v_1); ih += 16; nn = ih - nh; if (nn > 0) { nn = nn < 16 ? nn : 16; npp += nn; } msk1 = _mm512_cmp_epi32_mask(v_it,v_m1,_MM_CMPINT_LT); ll = _mm512_mask2int(_mm512_knot(msk1)); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ mm = j + ncoff + npbmx*i + nboff; v_x = _mm512_loadunpacklo_ps(v_x,&ppbuff[mm]); v_x = _mm512_loadunpackhi_ps(v_x,&ppbuff[mm+16]); if (ll==0) { _mm512_i32scatter_ps((float *)ppart,v_it,v_x,4); } else { _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_it, v_x,4); } v_it = _mm512_add_epi32(v_it,v_m1); } if (ll != 0) { ist = 1; } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+(ntmax+1)*l)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ /* holes with locations great than npp-ip do not need to be filled */ if (ih < nh) { ip = nh - ih; ii = nh; nn = ihole[2*(ii+noff)] - 1; v_it0 = _mm512_set1_epi32(nn); ih += 1; j2 = ihole[2*(ih+noff)] - 1; v_m2 = _mm512_sub_epi32(v_m2,v_1); /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 16*(ip/16); */ nps = (ip >> 4) << 4; /* loop over particles in groups of 16 */ for (j = 0; j < nps; j+=16) { /* j2 = ihole[2*(ih+noff)] - 1; */ v_mm = _mm512_add_epi32(_mm512_set1_epi32(ih),v_ioff); v_mm = _mm512_add_epi32(v_mm,v_m2); v_mm = _mm512_add_epi32(v_mm,v_mm); v_is = _mm512_i32gather_epi32(v_mm,(int *)ihole,4); v_is = _mm512_sub_epi32(v_is,v_1); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+(ntmax+1)*l)] - 1; */ /* } */ kk = 0; for (ll = 0; ll < 16; ll++) { j1 = npp - j - ll - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+(ntmax+1)*l)] - 1; } else { ls[kk] = j1; kk += 1; } } v_it = _mm512_load_epi32(ls); v_it0 = _mm512_set1_epi32(kk); msk1 = _mm512_cmp_epi32_mask(v_ioff,v_it0,_MM_CMPINT_LT); v_is = _mm512_add_epi32(v_is,v_m3); v_it = _mm512_add_epi32(v_it,v_m3); for (i = 0; i < idimp; i++) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ if (kk==16) { v_x = _mm512_i32gather_ps(v_it,(float *)ppart,4); _mm512_i32scatter_ps((float *)ppart,v_is,v_x,4); } else { v_x = _mm512_mask_i32gather_ps(v_zero,msk1,v_it, (float *)ppart,4); _mm512_mask_i32scatter_ps((float *)ppart,msk1,v_is, v_x,4); } v_is = _mm512_add_epi32(v_is,v_m1); v_it = _mm512_add_epi32(v_it,v_m1); } ih += kk; /* holes with locations great than npp-ip do not need to be filled */ } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*l)] - 1; } } npp -= ip; } kpic[l] = npp; } return; } /*--------------------------------------------------------------------*/ void cknccguard3l(float fxyz[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* replicate extended periodic vector field fxyz linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, fxyz needs to be 64 byte aligned nxe needs to be a multiple of 4 local data */ int j, k, l, nxs, nxyen, ll; nxs = 4*(nx/4); nxyen = 4*nxe*nye; /* copy edges of extended field */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,ll) for (l = 0; l < nz; l++) { ll = nxyen*l; for (k = 0; k < ny; k++) { fxyz[4*nx+4*nxe*k+ll] = fxyz[4*nxe*k+ll]; fxyz[1+4*nx+4*nxe*k+ll] = fxyz[1+4*nxe*k+ll]; fxyz[2+4*nx+4*nxe*k+ll] = fxyz[2+4*nxe*k+ll]; } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[4*j+4*nxe*ny+ll], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[4*j+ll])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[4*j+4*nxe*ny+ll] = fxyz[4*j+ll]; fxyz[1+4*j+4*nxe*ny+ll] = fxyz[1+4*j+ll]; fxyz[2+4*j+4*nxe*ny+ll] = fxyz[2+4*j+ll]; } fxyz[4*nx+4*nxe*ny+ll] = fxyz[ll]; fxyz[1+4*nx+4*nxe*ny+ll] = fxyz[1+ll]; fxyz[2+4*nx+4*nxe*ny+ll] = fxyz[2+ll]; } #pragma omp for \ private(j,k) for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[4*j+4*nxe*k+nxyen*nz], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[4*j+4*nxe*k])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[4*j+4*nxe*k+nxyen*nz] = fxyz[4*j+4*nxe*k]; fxyz[1+4*j+4*nxe*k+nxyen*nz] = fxyz[1+4*j+4*nxe*k]; fxyz[2+4*j+4*nxe*k+nxyen*nz] = fxyz[2+4*j+4*nxe*k]; } fxyz[4*nx+4*nxe*k+nxyen*nz] = fxyz[4*nxe*k]; fxyz[1+4*nx+4*nxe*k+nxyen*nz] = fxyz[1+4*nxe*k]; fxyz[2+4*nx+4*nxe*k+nxyen*nz] = fxyz[2+4*nxe*k]; } } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { _mm512_mask_store_ps(&fxyz[4*j+4*nxe*ny+nxyen*nz], _mm512_int2mask(30583),_mm512_load_ps(&fxyz[4*j])); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { fxyz[4*j+4*nxe*ny+nxyen*nz] = fxyz[4*j]; fxyz[1+4*j+4*nxe*ny+nxyen*nz] = fxyz[1+4*j]; fxyz[2+4*j+4*nxe*ny+nxyen*nz] = fxyz[2+4*j]; } fxyz[4*nx+4*nxe*ny+nxyen*nz] = fxyz[0]; fxyz[1+4*nx+4*nxe*ny+nxyen*nz] = fxyz[1]; fxyz[2+4*nx+4*nxe*ny+nxyen*nz] = fxyz[2]; return; } /*--------------------------------------------------------------------*/ void ckncacguard3l(float cu[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* accumulate extended periodic field cu linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, fxyz needs to be 64 byte aligned nxe needs to be a multiple of 4 local data */ int j, k, l, nxs, nxyen, ll; __m512 v_cu, v_zero; nxs = 4*(nx/4); nxyen = 4*nxe*nye; v_zero = _mm512_set1_ps(0.0f); /* accumulate edges of extended field */ #pragma omp parallel { #pragma omp for \ private(j,k,l,ll,v_cu) for (l = 0; l < nz; l++) { ll = nxyen*l; for (k = 0; k < ny; k++) { cu[4*nxe*k+ll] += cu[4*nx+4*nxe*k+ll]; cu[1+4*nxe*k+ll] += cu[1+4*nx+4*nxe*k+ll]; cu[2+4*nxe*k+ll] += cu[2+4*nx+4*nxe*k+ll]; cu[4*nx+4*nxe*k+ll] = 0.0; cu[1+4*nx+4*nxe*k+ll] = 0.0; cu[2+4*nx+4*nxe*k+ll] = 0.0; } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+ll]); v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+ll]),v_cu); _mm512_store_ps(&cu[4*j+ll],v_cu); _mm512_store_ps(&cu[4*j+4*nxe*ny+ll],v_zero); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { cu[4*j+ll] += cu[4*j+4*nxe*ny+ll]; cu[1+4*j+ll] += cu[1+4*j+4*nxe*ny+ll]; cu[2+4*j+ll] += cu[2+4*j+4*nxe*ny+ll]; cu[4*j+4*nxe*ny+ll] = 0.0; cu[1+4*j+4*nxe*ny+ll] = 0.0; cu[2+4*j+4*nxe*ny+ll] = 0.0; } cu[ll] += cu[4*nx+4*nxe*ny+ll]; cu[1+ll] += cu[1+4*nx+4*nxe*ny+ll]; cu[2+ll] += cu[2+4*nx+4*nxe*ny+ll]; cu[4*nx+4*nxe*ny+ll] = 0.0; cu[1+4*nx+4*nxe*ny+ll] = 0.0; cu[2+4*nx+4*nxe*ny+ll] = 0.0; } #pragma omp for \ private(j,k,v_cu) for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { v_cu = _mm512_load_ps(&cu[4*j+4*nxe*k+nxyen*nz]); v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j+4*nxe*k]),v_cu); _mm512_store_ps(&cu[4*j+4*nxe*k],v_cu); _mm512_store_ps(&cu[4*j+4*nxe*k+nxyen*nz],v_zero); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { cu[4*j+4*nxe*k] += cu[4*j+4*nxe*k+nxyen*nz]; cu[1+4*j+4*nxe*k] += cu[1+4*j+4*nxe*k+nxyen*nz]; cu[2+4*j+4*nxe*k] += cu[2+4*j+4*nxe*k+nxyen*nz]; cu[4*j+4*nxe*k+nxyen*nz] = 0.0; cu[1+4*j+4*nxe*k+nxyen*nz] = 0.0; cu[2+4*j+4*nxe*k+nxyen*nz] = 0.0; } cu[4*nxe*k] += cu[4*nx+4*nxe*k+nxyen*nz]; cu[1+4*nxe*k] += cu[1+4*nx+4*nxe*k+nxyen*nz]; cu[2+4*nxe*k] += cu[2+4*nx+4*nxe*k+nxyen*nz]; cu[4*nx+4*nxe*k+nxyen*nz] = 0.0; cu[1+4*nx+4*nxe*k+nxyen*nz] = 0.0; cu[2+4*nx+4*nxe*k+nxyen*nz] = 0.0; } } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { v_cu = _mm512_load_ps(&cu[4*j+4*nxe*ny+nxyen*nz]); v_cu = _mm512_add_ps(_mm512_load_ps(&cu[4*j]),v_cu); _mm512_store_ps(&cu[4*j],v_cu); _mm512_store_ps(&cu[4*j+4*nxe*ny+nxyen*nz],v_zero); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { cu[4*j] += cu[4*j+4*nxe*ny+nxyen*nz]; cu[1+4*j] += cu[1+4*j+4*nxe*ny+nxyen*nz]; cu[2+4*j] += cu[2+4*j+4*nxe*ny+nxyen*nz]; cu[4*j+4*nxe*ny+nxyen*nz] = 0.0; cu[1+4*j+4*nxe*ny+nxyen*nz] = 0.0; cu[2+4*j+4*nxe*ny+nxyen*nz] = 0.0; } cu[0] += cu[4*nx+4*nxe*ny+nxyen*nz]; cu[1] += cu[1+4*nx+4*nxe*ny+nxyen*nz]; cu[2] += cu[2+4*nx+4*nxe*ny+nxyen*nz]; cu[4*nx+4*nxe*ny+nxyen*nz] = 0.0; cu[1+4*nx+4*nxe*ny+nxyen*nz] = 0.0; cu[2+4*nx+4*nxe*ny+nxyen*nz] = 0.0; return; } /*--------------------------------------------------------------------*/ void ckncaguard3l(float q[], int nx, int ny, int nz, int nxe, int nye, int nze) { /* accumulate extended periodic scalar field q linear interpolation nx/ny/nz = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 nze = third dimension of field arrays, must be >= nz+1 requires KNC, q needs to be 64 byte aligned nxe needs to be a multiple of 16 local data */ int j, k, l, nxs, nxye, ll; __m512 v_q; nxs = 16*(nx/16); nxye = nxe*nye; /* accumulate edges of extended field */ #pragma omp parallel { #pragma omp for \ private(j,k,l,ll,v_q) for (l = 0; l < nz; l++) { ll = nxye*l; for (k = 0; k < ny; k++) { q[nxe*k+ll] += q[nx+nxe*k+ll]; q[nx+nxe*k+ll] = 0.0; } /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*ny+ll]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j+ll]),v_q); _mm512_store_ps(&q[j+ll],v_q); _mm512_store_ps(&q[j+nxe*ny+ll],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j+ll] += q[j+nxe*ny+ll]; q[j+nxe*ny+ll] = 0.0; } q[ll] += q[nx+nxe*ny+ll]; q[nx+nxe*ny+ll] = 0.0; } #pragma omp for \ private(j,k,v_q) for (k = 0; k < ny; k++) { /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*k+nxye*nz]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j+nxe*k]),v_q); _mm512_store_ps(&q[j+nxe*k],v_q); _mm512_store_ps(&q[j+nxe*k+nxye*nz],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j+nxe*k] += q[j+nxe*k+nxye*nz]; q[j+nxe*k+nxye*nz] = 0.0; } q[nxe*k] += q[nx+nxe*k+nxye*nz]; q[nx+nxe*k+nxye*nz] = 0.0; } } /* vector loop over elements in blocks of 16 */ for (j = 0; j < nxs; j+=16) { v_q = _mm512_load_ps(&q[j+nxe*ny+nxye*nz]); v_q = _mm512_add_ps(_mm512_load_ps(&q[j]),v_q); _mm512_store_ps(&q[j],v_q); _mm512_store_ps(&q[j+nxe*ny+nxye*nz],_mm512_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j] += q[j+nxe*ny+nxye*nz]; q[j+nxe*ny+nxye*nz] = 0.0; } q[0] += q[nx+nxe*ny+nxye*nz]; q[nx+nxe*ny+nxye*nz] = 0.0; return; } /*--------------------------------------------------------------------*/ void ckncmpois33(float complex q[], float complex fxyz[], int isign, float complex ffc[], float ax, float ay, float az, float affp, float *we, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions. for isign = 0, output: ffc input: isign,ax,ay,az,affp,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd for isign = -1, output: fxyz, we input: q,ffc,isign,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd approximate flop count is: 59*nxc*nyc*nzc + 26*(nxc*nyc + nxc*nzc + nyc*nzc) where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated equation used is: fx[kz][ky][kx] = -sqrt(-1)*kx*g[kz][ky][kx]*s[kz][ky][kx], fy[kz][ky][kx] = -sqrt(-1)*ky*g[kz][ky][kx]*s[kz][ky][kx], fz[kz][ky][kx] = -sqrt(-1)*kz*g[kz][ky][kx]*s[kz][ky][kx], where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s[kz][ky][kx], s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fz(kx=pi) = 0, fx(ky=pi) = fy(ky=pi) = fx(ky=pi) = 0, fx(kz=pi) = fy(kz=pi) = fz(kz=pi) = 0, fx(kx=0,ky=0,kz=0) = fy(kx=0,ky=0,kz=0) = fz(kx=0,ky=0,kz=0) = 0. q[l][k][j] = complex charge density for fourier mode (j,k,l) fxyz[l][k][j][0] = x component of complex force/charge fxyz[l][k][j][1] = y component of complex force/charge fxyz[l][k][j][2] = z component of complex force/charge all for fourier mode (j,k,l) cimag(ffc[l][k][j]) = finite-size particle shape factor s for fourier mode (j,k,l) creal(ffc[l][k][j]) = potential green's function g for fourier mode (j,k,l) ax/ay/az = half-width of particle in x/y/z direction affp = normalization constant = nx*ny*nz/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))* |q[kz][ky][kx]*s[kz][ky][kx]|**2) nx/ny/nz = system length in x/y/z direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nzv = third dimension of field arrays, must be >= nz nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh nzhd = third dimension of form factor array, must be >= nzh requires KNC, q, fxy, ffc need to be 64 byte aligned nxhd, nxvh need to be a multiple of 8 fxyz needs to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float dnx, dny, dnz, dkx, dky, dkz, at1, at2, at3, at4, at5, at6; float complex zero, zt1, zt2; double wp, sum1, sum2; __m512i v_j, v_it, v_perm; __m512 v_dnx, v_dny, v_dnz, v_dky, v_dkz, v_at1, v_at2, v_at3, v_at4; __m512 v_zero, v_zt1, v_zt2, v_zt3, v_zt4; __m512 a, b, c, d, e, f, g, h; __m512d v_wp, v_d; __attribute__((aligned(64))) double dd[8]; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 8*(nxh/8); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_perm = _mm512_set_epi32(15,14,11,10,7,6,3,2,13,12,9,8,5,4,1,0); if (isign != 0) goto L40; /* prepare form factor array */ for (l = 0; l < nzh; l++) { dkz = dnz*(float) l; ll = nxyhd*l; at1 = dkz*dkz; at2 = pow((dkz*az),2); for (k = 0; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; at3 = dky*dky + at1; at4 = pow((dky*ay),2) + at2; for (j = 0; j < nxh; j++) { dkx = dnx*(float) j; at5 = dkx*dkx + at3; at6 = exp(-0.5*(pow((dkx*ax),2) + at4)); if (at5==0.0) { ffc[j+kk+ll] = affp + 1.0*_Complex_I; } else { ffc[j+kk+ll] = (affp*at6/at5) + at6*_Complex_I; } } } } return; /* calculate force/charge and sum field energy */ L40: sum1 = 0.0; /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,wp, \ v_it,v_dky,v_dkz,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,a,b, \ c,d,e,f,g,h,v_d,v_wp,dd) \ reduction(+:sum1) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps( _mm512_set1_epi32(l),_MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; wp = 0.0; v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); */ v_at1 = _mm512_load_ps((float *)&ffc[j+kk+ll]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* at3 = dky*at1; */ v_at3 = _mm512_mul_ps(v_dky,v_at1); /* at4 = dkz*at1; */ v_at4 = _mm512_mul_ps(v_dkz,v_at1); /* zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+kj+lj]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+k1+lj]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+kj+lj)] = at2*zt1; */ /* fxyz[1+4*(j+kj+lj)] = at3*zt1; */ /* fxyz[2+4*(j+kj+lj)] = at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = _mm512_mul_ps(v_at3,v_zt1); c = _mm512_mul_ps(v_at4,v_zt1); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+kj+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+kj+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+kj+lj)],d); /* fxyz[4*(j+k1+lj)] = at2*zt2; */ /* fxyz[1+4*(j+k1+lj)] = -at3*zt2; */ /* fxyz[2+4*(j+k1+lj)] = at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2)); c = _mm512_mul_ps(v_at4,v_zt2); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],d); /* wp += at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) */ /* + q[j+k1+lj]*conjf(q[j+k1+lj])); */ v_zt3 = _mm512_mul_ps(v_zt1,v_zt1); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_mul_ps(v_at1,v_zt3); /* zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+kj+l1]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+k1+l1]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+kj+l1)] = at2*zt1; */ /* fxyz[1+4*(j+kj+l1)] = at3*zt1; */ /* fxyz[2+4*(j+kj+l1)] = -at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = _mm512_mul_ps(v_at3,v_zt1); c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt1)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],d); /* fxyz[4*(j+k1+l1)] = at2*zt2; */ /* fxyz[1+4*(j+k1+l1)] = -at3*zt2; */ /* fxyz[2+4*(j+k1+l1)] = -at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2)); c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280), c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255), a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero, _mm512_int2mask(255),b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680), g,177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855), e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680), h,177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855), f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],d); /* wp += at1*(q[j+kj+l1]*conjf(q[j+kj+l1]) */ /* + q[j+k1+l1]*conjf(q[j+k1+l1])); */ v_zt4 = _mm512_mul_ps(v_zt1,v_zt1); v_zt4 = _mm512_add_ps(v_zt4,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_at1,v_zt4)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j+kk+ll])*cimagf(ffc[j+kk+ll]); at2 = at1*dnx*(float) j; at3 = dky*at1; at4 = dkz*at1; zt1 = cimagf(q[j+kj+lj]) - crealf(q[j+kj+lj])*_Complex_I; zt2 = cimagf(q[j+k1+lj]) - crealf(q[j+k1+lj])*_Complex_I; fxyz[4*(j+kj+lj)] = at2*zt1; fxyz[1+4*(j+kj+lj)] = at3*zt1; fxyz[2+4*(j+kj+lj)] = at4*zt1; fxyz[4*(j+k1+lj)] = at2*zt2; fxyz[1+4*(j+k1+lj)] = -at3*zt2; fxyz[2+4*(j+k1+lj)] = at4*zt2; zt1 = cimagf(q[j+kj+l1]) - crealf(q[j+kj+l1])*_Complex_I; zt2 = cimagf(q[j+k1+l1]) - crealf(q[j+k1+l1])*_Complex_I; fxyz[4*(j+kj+l1)] = at2*zt1; fxyz[1+4*(j+kj+l1)] = at3*zt1; fxyz[2+4*(j+kj+l1)] = -at4*zt1; fxyz[4*(j+k1+l1)] = at2*zt2; fxyz[1+4*(j+k1+l1)] = -at3*zt2; fxyz[2+4*(j+k1+l1)] = -at4*zt2; at1 = at1*(q[j+kj+lj]*conjf(q[j+kj+lj]) + q[j+k1+lj]*conjf(q[j+k1+lj]) + q[j+kj+l1]*conjf(q[j+kj+l1]) + q[j+k1+l1]*conjf(q[j+k1+l1])); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk+ll])*cimagf(ffc[kk+ll]); at3 = at1*dny*(float) k; at4 = dkz*at1; zt1 = cimagf(q[kj+lj]) - crealf(q[kj+lj])*_Complex_I; zt2 = cimagf(q[kj+l1]) - crealf(q[kj+l1])*_Complex_I; fxyz[4*(kj+lj)] = zero; fxyz[1+4*(kj+lj)] = at3*zt1; fxyz[2+4*(kj+lj)] = at4*zt1; fxyz[4*(k1+lj)] = zero; fxyz[1+4*(k1+lj)] = zero; fxyz[2+4*(k1+lj)] = zero; fxyz[4*(kj+l1)] = zero; fxyz[1+4*(kj+l1)] = at3*zt2; fxyz[2+4*(kj+l1)] = -at4*zt2; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[kj+lj]*conjf(q[kj+lj]) + q[kj+l1]*conjf(q[kj+l1])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); */ v_at1 = _mm512_load_ps((float *)&ffc[j+ll]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* at4 = dkz*at1; */ v_at4 = _mm512_mul_ps(v_dkz,v_at1); /* zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+lj]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+l1]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+lj)] = at2*zt1; */ /* fxyz[1+4*(j+lj)] = zero; */ /* fxyz[2+4*(j+lj)] = at4*zt1; */ a = _mm512_mul_ps(v_at2,v_zt1); b = v_zero; c = _mm512_mul_ps(v_at4,v_zt1); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c, 78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a, 78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255), b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+lj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+lj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+lj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+lj)],d); /* fxyz[4*(j+k1+lj)] = zero; */ /* fxyz[1+4*(j+k1+lj)] = zero; */ /* fxyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+lj)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+lj)],v_zero); /* fxyz[4*(j+l1)] = at2*zt2; */ /* fxyz[1+4*(j+l1)] = zero; */ /* fxyz[2+4*(j+l1)] = -at4*zt2; */ a = _mm512_mul_ps(v_at2,v_zt2); b = v_zero; c = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at4,v_zt2)); /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c, 78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a, 78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255), b,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+l1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+l1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+l1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+l1)],d); /* fxyz[4*(j+k1+l1)] = zero; */ /* fxyz[1+4*(j+k1+l1)] = zero; */ /* fxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero); /* wp += at1*(q[j+lj]*conjf(q[j+lj]) */ /* + q[j+l1]*conjf(q[j+l1])); */ v_zt3 = _mm512_mul_ps(v_zt1,v_zt1); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_mul_ps(v_at1,v_zt3); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j+ll])*cimagf(ffc[j+ll]); at2 = at1*dnx*(float) j; at4 = dkz*at1; zt1 = cimagf(q[j+lj]) - crealf(q[j+lj])*_Complex_I; zt2 = cimagf(q[j+l1]) - crealf(q[j+l1])*_Complex_I; fxyz[4*(j+lj)] = at2*zt1; fxyz[1+4*(j+lj)] = zero; fxyz[2+4*(j+lj)] = at4*zt1; fxyz[4*(j+k1+lj)] = zero; fxyz[1+4*(j+k1+lj)] = zero; fxyz[2+4*(j+k1+lj)] = zero; fxyz[4*(j+l1)] = at2*zt2; fxyz[1+4*(j+l1)] = zero; fxyz[2+4*(j+l1)] = -at4*zt2; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j+lj]*conjf(q[j+lj]) + q[j+l1]*conjf(q[j+l1])); wp += (double) at1; } /* mode numbers kx = 0, nx/2 */ at1 = crealf(ffc[ll])*cimagf(ffc[ll]); at4 = dkz*at1; zt1 = cimagf(q[lj]) - crealf(q[lj])*_Complex_I; fxyz[4*lj] = zero; fxyz[1+4*lj] = zero; fxyz[2+4*lj] = at4*zt1; fxyz[4*(k1+lj)] = zero; fxyz[1+4*(k1+lj)] = zero; fxyz[2+4*(k1+lj)] = zero; fxyz[4*l1] = zero; fxyz[1+4*l1] = zero; fxyz[2+4*l1] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[lj]*conjf(q[lj])); wp += (double) at1; /* sum1 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum1 += (wp + dd[0]); } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum2 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,wp,v_it,v_dky,v_at1, \ v_at2,v_at3,v_zt1,v_zt2,v_zt3,a,b,c,d,e,f,g,h,v_d,v_wp,dd) \ reduction(+:sum2) for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; v_wp = _mm512_set1_pd(0.0); /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); */ v_at1 = _mm512_load_ps((float *)&ffc[j+kk]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* at3 = dky*at1; */ v_at3 = _mm512_mul_ps(v_dky,v_at1); /* zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j+kj]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845), v_zero,v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; */ v_zt2 = _mm512_load_ps((float *)&q[j+k1]); v_zt2 = _mm512_mask_sub_ps(v_zt2,_mm512_int2mask(21845), v_zero,v_zt2); v_zt2 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3), v_zero); v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(3), v_zero); } /* fxyz[4*(j+kj)] = at2*zt1; */ /* fxyz[1+4*(j+kj)] = at3*zt1; */ /* fxyz[2+4*(j+kj)] = zero; */ a = _mm512_mul_ps(v_at2,v_zt1); b = _mm512_mul_ps(v_at3,v_zt1); c = v_zero; /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),b, 78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+kj)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+kj)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+kj)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+kj)],d); /* fxyz[4*(j+k1)] = at2*zt2; */ /* fxyz[1+4*(j+k1)] = -at3*zt2; */ /* fxyz[2+4*(j+k1)] = zero; */ a = _mm512_mul_ps(v_at2,v_zt2); b = _mm512_sub_ps(v_zero,_mm512_mul_ps(v_at3,v_zt2)); c = v_zero;; /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78); g = _mm512_mask_permute4f128_ps(b,_mm512_int2mask(65280), v_zero,78); h = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(255),b, 78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),g, 177); b = _mm512_mask_permute4f128_ps(g,_mm512_int2mask(3855),e,177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),h, 177); d = _mm512_mask_permute4f128_ps(h,_mm512_int2mask(3855),f,177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*(j+k1)],a); _mm512_store_ps((float *)&fxyz[8+4*(j+k1)],b); _mm512_store_ps((float *)&fxyz[16+4*(j+k1)],c); _mm512_store_ps((float *)&fxyz[24+4*(j+k1)],d); /* fxyz[4*(j+kj+l1)] = zero; */ /* fxyz[1+4*(j+kj+l1)] = zero; */ /* fxyz[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+kj+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+kj+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+kj+l1)],v_zero); /* fxyz[4*(j+k1+l1)] = zero; */ /* fxyz[1+4*(j+k1+l1)] = zero; */ /* fxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero); /* at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); */ v_zt3 = _mm512_mul_ps(v_zt1,v_zt1); v_zt3 = _mm512_add_ps(v_zt3,_mm512_mul_ps(v_zt2,v_zt2)); v_zt3 = _mm512_mul_ps(v_at1,v_zt3); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; fxyz[4*(j+kj)] = at2*zt1; fxyz[1+4*(j+kj)] = at3*zt1; fxyz[2+4*(j+kj)] = zero; fxyz[4*(j+k1)] = at2*zt2; fxyz[1+4*(j+k1)] = -at3*zt2; fxyz[2+4*(j+k1)] = zero; fxyz[4*(j+kj+l1)] = zero; fxyz[1+4*(j+kj+l1)] = zero; fxyz[2+4*(j+kj+l1)] = zero; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); wp += (double) at1; } /* sum2 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (wp + dd[0]); } /* mode numbers kx = 0, nx/2 */ wp = 0.0; v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk])*cimagf(ffc[kk]); at3 = at1*dny*(float) k; zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I; fxyz[4*kj] = zero; fxyz[1+4*kj] = at3*zt1; fxyz[2+4*kj] = zero; fxyz[4*k1] = zero; fxyz[1+4*k1] = zero; fxyz[2+4*k1] = zero; fxyz[4*(kj+l1)] = zero; fxyz[1+4*(kj+l1)] = zero; fxyz[2+4*(kj+l1)] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; at1 = at1*(q[kj]*conjf(q[kj])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhs; j+=8) { /* at1 = crealf(ffc[j])*cimagf(ffc[j]); */ v_at1 = _mm512_load_ps((float *)&ffc[j]); v_at2 = (__m512)_mm512_shuffle_epi32((__m512i)v_at1,177); v_at1 = _mm512_mul_ps(v_at1,v_at2); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_at2 = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_at2 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_dnx,v_at2)); /* zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; */ v_zt1 = _mm512_load_ps((float *)&q[j]); v_zt1 = _mm512_mask_sub_ps(v_zt1,_mm512_int2mask(21845),v_zero, v_zt1); v_zt1 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,177); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(3),v_zero); } /* fxyz[4*j] = at2*zt1; */ /* fxyz[1+4*j] = zero; */ /* fxyz[2+4*j] = zero; */ a = _mm512_mul_ps(v_at2,v_zt1); b = v_zero; c = v_zero; /* perform 4x16 transpose for fxyz field components */ e = _mm512_mask_permute4f128_ps(a,_mm512_int2mask(65280),c,78); f = _mm512_mask_permute4f128_ps(c,_mm512_int2mask(255),a,78); a = _mm512_mask_permute4f128_ps(e,_mm512_int2mask(61680),v_zero, 177); b = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),e, 177); c = _mm512_mask_permute4f128_ps(f,_mm512_int2mask(61680),v_zero, 177); d = _mm512_mask_permute4f128_ps(v_zero,_mm512_int2mask(3855),f, 177); a = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)a); b = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)b); c = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)c); d = (__m512)_mm512_permutevar_epi32(v_perm,(__m512i)d); _mm512_store_ps((float *)&fxyz[4*j],a); _mm512_store_ps((float *)&fxyz[8+4*j],b); _mm512_store_ps((float *)&fxyz[16+4*j],c); _mm512_store_ps((float *)&fxyz[24+4*j],d); /* fxyz[4*(j+k1)] = zero; */ /* fxyz[1+4*(j+k1)] = zero; */ /* fxyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1)],v_zero); /* fxyz[4*(j+l1)] = zero; */ /* fxyz[1+4*(j+l1)] = zero; */ /* fxyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+l1)],v_zero); /* fxyz[4*(j+k1+l1)] = zero; */ /* fxyz[1+4*(j+k1+l1)] = zero; */ /* fxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[8+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[16+4*(j+k1+l1)],v_zero); _mm512_store_ps((float *)&fxyz[24+4*(j+k1+l1)],v_zero); /* wp += at1*(q[j]*conjf(q[j])); */ v_zt3 = _mm512_mul_ps(v_at1,_mm512_mul_ps(v_zt1,v_zt1)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt3)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt3,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = crealf(ffc[j])*cimagf(ffc[j]); at2 = at1*dnx*(float) j; zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; fxyz[4*j] = at2*zt1; fxyz[1+4*j] = zero; fxyz[2+4*j] = zero; fxyz[4*(j+k1)] = zero; fxyz[1+4*(j+k1)] = zero; fxyz[2+4*(j+k1)] = zero; fxyz[4*(j+l1)] = zero; fxyz[1+4*(j+l1)] = zero; fxyz[2+4*(j+l1)] = zero; fxyz[4*(j+k1+l1)] = zero; fxyz[1+4*(j+k1+l1)] = zero; fxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(q[j]*conjf(q[j])); wp += (double) at1; } fxyz[0] = zero; fxyz[1] = zero; fxyz[2] = zero; fxyz[4*k1] = zero; fxyz[1+4*k1] = zero; fxyz[2+4*k1] = zero; fxyz[4*l1] = zero; fxyz[1+4*l1] = zero; fxyz[2+4*l1] = zero; fxyz[4*(k1+l1)] = zero; fxyz[1+4*(k1+l1)] = zero; fxyz[2+4*(k1+l1)] = zero; /* sum2 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (wp + dd[0]); /* *we = wp*((float) nx)*((float) ny)*((float) nz); */ *we = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void ckncmcuperp3(float complex cu[], int nx, int ny, int nz, int nxvh, int nyv, int nzv) { /* this subroutine calculates the transverse current in fourier space input: all, output: cu approximate flop count is: 100*nxc*nyc*nzc + 36*(nxc*nyc + nxc*nzc + nyc*nzc) and (nx/2)*nyc*nzc divides where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the transverse current is calculated using the equation: cux[kz][ky][kx] = cux[kz][ky][kx] - kx*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) cuy([kz][ky][kx] = cuy[kz][ky][kx] - ky*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) cuz[kz][ky][kx] = cuz[kz][ky][kx] - kz*(kx*cux[kz][ky][kx]+ky*cuy[kz][ky][kx] + kz*cuz[kz][ky][kx])/(kx*kx+ky*ky+kz*kz) where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, except for cux(kx=pi) = cuy(kx=pi) = cuz(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = cux(ky=pi) = 0, cux(kz=pi) = cuy(kz=pi) = cuz(kz=pi) = 0, cux(kx=0,ky=0,kz=0) = cuy(kx=0,ky=0,kz=0) = cuz(kx=0,ky=0,kz=0) = 0. cu[l][k][j][i] = complex current density for fourier mode (j,k,l) nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz requires KNC, cu need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 cu needs to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kj, lj, nxvyh; float dnx, dny, dnz, dkx, dky, dkz, dky2, dkz2, dkyz2, at1; float complex zero, zt1; __m512i v_j, v_it; __m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_dkz2, v_dkyz2; __m512 v_dk, v_at1, v_zt1, v_zt2, v_zero, v_one, v_at, v_as; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_one = _mm512_set1_ps(1.0f); /* calculate transverse part of current */ /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,lj,kj,dkx,dky,dkz,dkz2,dkyz2,at1,zt1,v_it,v_dk, \ v_dkx,v_dky,v_dkz,v_dkz2,v_dkyz2,v_at1,v_zt1,v_zt2,v_at,v_as) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l), _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); lj = nxvyh*l; l1 = nxvyh*nz - lj; dkz2 = dkz*dkz; v_dkz2 = _mm512_set1_ps(dkz2); /* add kz to gradient operator */ v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkz); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kj = nxvh*k; k1 = nxvh*ny - kj; dkyz2 = dky*dky + dkz2; v_dkyz2 = _mm512_fmadd_ps(v_dky,v_dky,v_dkz2); /* add ky to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* at1 = 1.0/(dkx*dkx + dkyz2); */ v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2); v_at1 = _mm512_div_ps(v_one,v_at1); /* add kx to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx); /* zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)] */ /* + dkz*cu[2+4*(j+kj+lj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]); v_zt1 = _mm512_mul_ps(v_dk,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+kj+lj)] -= dkx*zt1; */ /* cu[1+4*(j+kj+lj)] -= dky*zt1; */ /* cu[2+4*(j+kj+lj)] -= dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+kj+lj)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)] */ /* + dkz*cu[2+4*(j+k1+lj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero, v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+k1+lj)] -= dkx*zt1; */ /* cu[1+4*(j+k1+lj)] += dky*zt1; */ /* cu[2+4*(j+k1+lj)] -= dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)] */ /* - dkz*cu[2+4*(j+kj+l1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336), v_zero,v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+kj+l1)] -= dkx*zt1; */ /* cu[1+4*(j+kj+l1)] -= dky*zt1; */ /* cu[2+4*(j+kj+l1)] += dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)] */ /* - dkz*cu[2+4*(j+k1+l1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(15420), v_zero,v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+k1+l1)] -= dkx*zt1; */ /* cu[1+4*(j+k1+l1)] += dky*zt1; */ /* cu[2+4*(j+k1+l1)] += dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dkyz2); zt1 = at1*(dkx*cu[4*(j+kj+lj)] + dky*cu[1+4*(j+kj+lj)] + dkz*cu[2+4*(j+kj+lj)]); cu[4*(j+kj+lj)] -= dkx*zt1; cu[1+4*(j+kj+lj)] -= dky*zt1; cu[2+4*(j+kj+lj)] -= dkz*zt1; zt1 = at1*(dkx*cu[4*(j+k1+lj)] - dky*cu[1+4*(j+k1+lj)] + dkz*cu[2+4*(j+k1+lj)]); cu[4*(j+k1+lj)] -= dkx*zt1; cu[1+4*(j+k1+lj)] += dky*zt1; cu[2+4*(j+k1+lj)] -= dkz*zt1; zt1 = at1*(dkx*cu[4*(j+kj+l1)] + dky*cu[1+4*(j+kj+l1)] - dkz*cu[2+4*(j+kj+l1)]); cu[4*(j+kj+l1)] -= dkx*zt1; cu[1+4*(j+kj+l1)] -= dky*zt1; cu[2+4*(j+kj+l1)] += dkz*zt1; zt1 = at1*(dkx*cu[4*(j+k1+l1)] - dky*cu[1+4*(j+k1+l1)] - dkz*cu[2+4*(j+k1+l1)]); cu[4*(j+k1+l1)] -= dkx*zt1; cu[1+4*(j+k1+l1)] += dky*zt1; cu[2+4*(j+k1+l1)] += dkz*zt1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kj = nxvh*k; k1 = nxvh*ny - kj; dky = dny*(float) k; at1 = 1.0/(dky*dky + dkz2); zt1 = at1*(dky*cu[1+4*(kj+lj)] + dkz*cu[2+4*(kj+lj)]); cu[1+4*(kj+lj)] -= dky*zt1; cu[2+4*(kj+lj)] -= dkz*zt1; cu[4*(k1+lj)] = zero; cu[1+4*(k1+lj)] = zero; cu[2+4*(k1+lj)] = zero; zt1 = at1*(dky*cu[1+4*(kj+l1)] - dkz*cu[2+4*(kj+l1)]); cu[1+4*(kj+l1)] -= dky*zt1; cu[2+4*(kj+l1)] += dkz*zt1; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* add ky to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(3084),v_zero); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* at1 = 1.0/(dkx*dkx + dkz2); */ v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkz2); v_at1 = _mm512_div_ps(v_one,v_at1); /* add kx to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx); /* zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]); v_zt1 = _mm512_mul_ps(v_dk,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+lj)] -= dkx*zt1; */ /* cu[2+4*(j+lj)] -= dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+lj)],v_zt2); /* cu[4*(j+k1+lj)] = zero; */ /* cu[1+4*(j+k1+lj)] = zero; */ /* cu[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+lj)],v_zero); /* zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(12336), v_zero,v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+l1)] -= dkx*zt1; */ /* cu[2+4*(j+l1)] += dkz*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+l1)],v_zt2); /* cu[4*(j+k1+l1)] = zero; */ /* cu[1+4*(j+k1+l1)] = zero; */ /* cu[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dkz2); zt1 = at1*(dkx*cu[4*(j+lj)] + dkz*cu[2+4*(j+lj)]); cu[4*(j+lj)] -= dkx*zt1; cu[2+4*(j+lj)] -= dkz*zt1; cu[4*(j+k1+lj)] = zero; cu[1+4*(j+k1+lj)] = zero; cu[2+4*(j+k1+lj)] = zero; zt1 = at1*(dkx*cu[4*(j+l1)] - dkz*cu[2+4*(j+l1)]); cu[4*(j+l1)] -= dkx*zt1; cu[2+4*(j+l1)] += dkz*zt1; cu[4*(j+k1+l1)] = zero; cu[1+4*(j+k1+l1)] = zero; cu[2+4*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ cu[2+4*lj] = zero; cu[4*(k1+lj)] = zero; cu[1+4*(k1+lj)] = zero; cu[2+4*(k1+lj)] = zero; cu[4*l1] = zero; cu[1+4*l1] = zero; cu[2+4*l1] = zero; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; #pragma omp parallel for \ private(j,k,k1,kj,dky,dky2,dkx,at1,zt1,v_it,v_dk,v_dkx,v_dky,v_dkyz2, \ v_at1,v_zt1,v_zt2,v_at,v_as) for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kj = nxvh*k; k1 = nxvh*ny - kj; dky2 = dky*dky; v_dkyz2 = _mm512_mul_ps(v_dky,v_dky); /* add ky to gradient operator */ v_dk = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* at1 = 1.0/(dkx*dkx + dky2); */ v_at1 = _mm512_fmadd_ps(v_dkx,v_dkx,v_dkyz2); v_at1 = _mm512_div_ps(v_one,v_at1); /* add kx to gradient operator */ v_dk = _mm512_mask_mov_ps(v_dk,_mm512_int2mask(771),v_dkx); /* zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]); v_zt1 = _mm512_mul_ps(v_dk,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); } /* cu[4*(j+kj)] -= dkx*zt1; */ /* cu[1+4*(j+kj)] -= dky*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_dk,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+kj)],v_zt2); /* zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]); */ v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]); v_as = _mm512_mask_sub_ps(v_dk,_mm512_int2mask(3084),v_zero, v_dk); v_zt1 = _mm512_mul_ps(v_as,v_zt2); v_at = (__m512)_mm512_shuffle_epi32((__m512i)v_zt1,78); v_zt1 = _mm512_add_ps(v_at,v_zt1); v_at = _mm512_permute4f128_ps(v_zt1,177); v_zt1 = _mm512_mul_ps(v_at1,_mm512_add_ps(v_at,v_zt1)); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero);; } /* cu[4*(j+k1)] -= dkx*zt1; */ /* cu[1+4*(j+k1)] += dky*zt1; */ v_zt2 = _mm512_sub_ps(v_zt2,_mm512_mul_ps(v_as,v_zt1)); _mm512_store_ps((float *)&cu[4*(j+k1)],v_zt2); /* cu[4*(j+kj+l1)] = zero; */ /* cu[1+4*(j+kj+l1)] = zero; */ /* cu[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+kj+l1)],v_zero); /* cu[4*(j+k1+l1)] = zero; */ /* cu[1+4*(j+k1+l1)] = zero; */ /* cu[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; at1 = 1.0/(dkx*dkx + dky2); zt1 = at1*(dkx*cu[4*(j+kj)] + dky*cu[1+4*(j+kj)]); cu[4*(j+kj)] -= dkx*zt1; cu[1+4*(j+kj)] -= dky*zt1; zt1 = at1*(dkx*cu[4*(j+k1)]- dky*cu[1+4*(j+k1)]); cu[4*(j+k1)] -= dkx*zt1; cu[1+4*(j+k1)] += dky*zt1; cu[4*(j+kj+l1)] = zero; cu[1+4*(j+kj+l1)] = zero; cu[2+4*(j+kj+l1)] = zero; cu[4*(j+k1+l1)] = zero; cu[1+4*(j+k1+l1)] = zero; cu[2+4*(j+k1+l1)] = zero; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kj = nxvh*k; k1 = nxvh*ny - kj; cu[1+4*kj] = zero; cu[4*k1] = zero; cu[1+4*k1] = zero; cu[2+4*k1] = zero; cu[4*(kj+l1)] = zero; cu[1+4*(kj+l1)] = zero; cu[2+4*(kj+l1)] = zero; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { v_zt2 = _mm512_load_ps((float *)&cu[4*j]); /* zero out kx = 0 mode */ if (j==0) { v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(255),v_zero); } /* cu[4*j] = zero; */ v_zt2 = _mm512_mask_mov_ps(v_zt2,_mm512_int2mask(771),v_zero); _mm512_store_ps((float *)&cu[4*j],v_zt2); /* cu[4*(j+k1)] = zero; */ /* cu[1+4*(j+k1)] = zero; */ /* cu[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1)],v_zero); /* cu[4*(j+l1)] = zero; */ /* cu[1+4*(j+l1)] = zero; */ /* cu[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+l1)],v_zero); /* cu[4*(j+k1+l1)] = zero; */ /* cu[1+4*(j+k1+l1)] = zero; */ /* cu[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&cu[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { cu[4*j] = zero; cu[4*(j+k1)] = zero; cu[1+4*(j+k1)] = zero; cu[2+4*(j+k1)] = zero; cu[4*(j+l1)] = zero; cu[1+4*(j+l1)] = zero; cu[2+4*(j+l1)] = zero; cu[4*(j+k1+l1)] = zero; cu[1+4*(j+k1+l1)] = zero; cu[2+4*(j+k1+l1)] = zero; } cu[0] = zero; cu[1] = zero; cu[2] = zero; cu[4*k1] = zero; cu[1+4*k1] = zero; cu[2+4*k1] = zero; cu[4*l1] = zero; cu[1+4*l1] = zero; cu[2+4*l1] = zero; cu[4*(k1+l1)] = zero; cu[1+4*(k1+l1)] = zero; cu[2+4*(k1+l1)] = zero; return; } /*--------------------------------------------------------------------*/ void ckncmibpois33(float complex cu[], float complex bxyz[], float complex ffc[], float ci, float *wm, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d poisson's equation in fourier space for magnetic field with periodic boundary conditions. input: cu,ffc,ci,nx,ny,nz,nxvh,nyv,nzv,nxhd,nyhd,nzhd output: bxyz, wm approximate flop count is: 193*nxc*nyc*nzc + 84*(nxc*nyc + nxc*nzc + nyc*nzc) where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the magnetic field is calculated using the equations: bx[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (ky*cuz[kz][ky][kx]-kz*cuy[kz][ky][kx]), by[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (kz*cux[kz][ky][kx]-kx*cuz[kz][ky][kx]), bz[kz][ky][kx] = ci*ci*sqrt(-1)*g[kz][ky][kx]* (kx*cuy[kz][ky][kx]-ky*cux[kz][ky][kx]), where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, and j,k,l = fourier mode numbers, g[kz][ky][kx] = (affp/(kx**2+ky**2+kz**2))*s(kx,ky,kz), s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2), except for bx(kx=pi) = by(kx=pi) = bz(kx=pi) = 0, bx(ky=pi) = by(ky=pi) = bx(ky=pi) = 0, bx(kz=pi) = by(kz=pi) = bz(kz=pi) = 0, bx(kx=0,ky=0,kz=0) = by(kx=0,ky=0,kz=0) = bz(kx=0,ky=0,kz=0) = 0. cu[l][k][j][i] = complex current density for fourier mode (j,k,l) bxyz[l][k][j][i] = i component of complex magnetic field all for fourier mode (j,k,l) aimag(ffc(j,k,l)) = finite-size particle shape factor s for fourier mode (j,k,l) real(ffc(j,k,l)) = potential green's function g for fourier mode (j,k,l) ci = reciprocal of velocity of light magnetic field energy is also calculated, using wm = nx*ny*nz*sum((affp/(kx**2+ky**2+kz**2))*ci*ci |cu[kz][ky][kx]*s[kz][ky][kx]|**2) this expression is valid only if the current is divergence-free nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz nxhd = dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh nzhd = third dimension of form factor array, must be >= nzh requires KNC, cu, bxyz, ffc need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 cu, bxyz need to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float dnx, dny, dnz, dky, dkz, ci2, at1, at2, at3, at4; float complex zero, zt1, zt2, zt3; double wp, sum1, sum2; __m512i v_j, v_it, v_n, v_m; __m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz, v_ci2; __m512 v_dk1, v_dk2, v_at1, v_at2, v_at3, v_at4, v_zero; __m512 v_zt1, v_zt2, v_zt3, v_zt4; __m512d v_wp, v_d; __attribute__((aligned(64))) double dd[8]; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; zero = 0.0 + 0.0*_Complex_I; ci2 = ci*ci; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4); v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_ci2 = _mm512_set1_ps(ci2); /* calculate magnetic field and sum field energy */ sum1 = 0.0; /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dky,dkz,at1,at2,at3,at4,zt1,zt2,zt3, \ wp,v_it,v_dkx,v_dky,v_dkz,v_dk1,v_dk2,v_at1,v_at2,v_at3,v_at4,v_zt1, \ v_zt2,v_zt3,v_zt4,v_d,v_wp,dd) \ reduction(+:sum1) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l), _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; wp = 0.0; v_wp = _mm512_set1_pd(0.0); /* add kz to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336), v_dky); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771), v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j+kk+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ /* at3 = dky*at1; */ /* at4 = dkz*at1; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j+kk+ll]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*(j+kj+lj)]) */ /* + crealf(cu[2+4*(j+kj+lj)])*_Complex_I;/ */ /* zt2 = -cimagf(cu[1+4*(j+kj+lj)]) */ /* + crealf(cu[1+4*(j+kj+lj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+kj+lj)]) */ /* + crealf(cu[4*(j+kj+lj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2; */ /* bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt1); /* wp += at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)]) */ /* + cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)]) */ /* + cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)])); */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* zt1 = -cimagf(cu[2+4*(j+k1+lj)]) */ /* + crealf(cu[2+4*(j+k1+lj)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+k1+lj)]) */ /* + crealf(cu[1+4*(j+k1+lj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+k1+lj)]) */ /* + crealf(cu[4*(j+k1+lj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336), v_zero,v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771), v_zero,v_at3); /* bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2; */ /* bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt1); /* wp += at1*(cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)]) */ /* + cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)]) */ /* + cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* zt1 = -cimagf(cu[2+4*(j+kj+l1)]) */ /* + crealf(cu[2+4*(j+kj+l1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+kj+l1)]) */ /* + crealf(cu[1+4*(j+kj+l1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+kj+l1)]) */ /* + crealf(cu[4*(j+kj+l1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771), v_zero,v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084), v_zero,v_at3); /* bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2; */ /* bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt1); /* wp += at1*(cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)]) */ /* + cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)]) */ /* + cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* zt1 = -cimagf(cu[2+4*(j+k1+l1)]) */ /* + crealf(cu[2+4*(j+k1+l1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+k1+l1)]) */ /* + crealf(cu[1+4*(j+k1+l1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+k1+l1)]) */ /* + crealf(cu[4*(j+k1+l1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690), v_zero,v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(13107), v_zero,v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3855), v_zero,v_at3); /* bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2; */ /* bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt1); /* wp += at1*(cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)]) */ /* + cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)]) */ /* + cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j+kk+ll]); at2 = at1*dnx*(float) j; at3 = dky*at1; at4 = dkz*at1; at1 = at1*cimagf(ffc[j+kk+ll]); zt1 = -cimagf(cu[2+4*(j+kj+lj)]) + crealf(cu[2+4*(j+kj+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+kj+lj)]) + crealf(cu[1+4*(j+kj+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+kj+lj)]) + crealf(cu[4*(j+kj+lj)])*_Complex_I; bxyz[4*(j+kj+lj)] = at3*zt1 - at4*zt2; bxyz[1+4*(j+kj+lj)] = at4*zt3 - at2*zt1; bxyz[2+4*(j+kj+lj)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+4*(j+k1+lj)]) + crealf(cu[2+4*(j+k1+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+k1+lj)]) + crealf(cu[1+4*(j+k1+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+k1+lj)]) + crealf(cu[4*(j+k1+lj)])*_Complex_I; bxyz[4*(j+k1+lj)] = -at3*zt1 - at4*zt2; bxyz[1+4*(j+k1+lj)] = at4*zt3 - at2*zt1; bxyz[2+4*(j+k1+lj)] = at2*zt2 + at3*zt3; zt1 = -cimagf(cu[2+4*(j+kj+l1)]) + crealf(cu[2+4*(j+kj+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+kj+l1)]) + crealf(cu[1+4*(j+kj+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+kj+l1)]) + crealf(cu[4*(j+kj+l1)])*_Complex_I; bxyz[4*(j+kj+l1)] = at3*zt1 + at4*zt2; bxyz[1+4*(j+kj+l1)] = -at4*zt3 - at2*zt1; bxyz[2+4*(j+kj+l1)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+4*(j+k1+l1)]) + crealf(cu[2+4*(j+k1+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+k1+l1)]) + crealf(cu[1+4*(j+k1+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+k1+l1)]) + crealf(cu[4*(j+k1+l1)])*_Complex_I; bxyz[4*(j+k1+l1)] = -at3*zt1 + at4*zt2; bxyz[1+4*(j+k1+l1)] = -at4*zt3 - at2*zt1; bxyz[2+4*(j+k1+l1)] = at2*zt2 + at3*zt3; at1 = at1*(cu[4*(j+kj+lj)]*conjf(cu[4*(j+kj+lj)]) + cu[1+4*(j+kj+lj)]*conjf(cu[1+4*(j+kj+lj)]) + cu[2+4*(j+kj+lj)]*conjf(cu[2+4*(j+kj+lj)]) + cu[4*(j+k1+lj)]*conjf(cu[4*(j+k1+lj)]) + cu[1+4*(j+k1+lj)]*conjf(cu[1+4*(j+k1+lj)]) + cu[2+4*(j+k1+lj)]*conjf(cu[2+4*(j+k1+lj)]) + cu[4*(j+kj+l1)]*conjf(cu[4*(j+kj+l1)]) + cu[1+4*(j+kj+l1)]*conjf(cu[1+4*(j+kj+l1)]) + cu[2+4*(j+kj+l1)]*conjf(cu[2+4*(j+kj+l1)]) + cu[4*(j+k1+l1)]*conjf(cu[4*(j+k1+l1)]) + cu[1+4*(j+k1+l1)]*conjf(cu[1+4*(j+k1+l1)]) + cu[2+4*(j+k1+l1)]*conjf(cu[2+4*(j+k1+l1)])); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = ci2*crealf(ffc[kk+ll]); at3 = at1*dny*(float) k; at4 = dkz*at1; at1 = at1*cimagf(ffc[kk+ll]); zt1 = -cimagf(cu[2+4*(kj+lj)]) + crealf(cu[2+4*(kj+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(kj+lj)]) + crealf(cu[1+4*(kj+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(kj+lj)]) + crealf(cu[4*(kj+lj)])*_Complex_I; bxyz[4*(kj+lj)] = at3*zt1 - at4*zt2; bxyz[1+4*(kj+lj)] = at4*zt3; bxyz[2+4*(kj+lj)] = -at3*zt3; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; zt1 = -cimagf(cu[2+4*(kj+l1)]) + crealf(cu[2+4*(kj+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(kj+l1)]) + crealf(cu[1+4*(kj+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(kj+l1)]) + crealf(cu[4*(kj+l1)])*_Complex_I; bxyz[4*(kj+l1)] = at3*zt1 + at4*zt2; bxyz[1+4*(kj+l1)] = -at4*zt3; bxyz[2+4*(kj+l1)] = -at3*zt3; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; at1 = at1*(cu[4*(kj+lj)]*conjf(cu[4*(kj+lj)]) + cu[1+4*(kj+lj)]*conjf(cu[1+4*(kj+lj)]) + cu[2+4*(kj+lj)]*conjf(cu[2+4*(kj+lj)]) + cu[4*(kj+l1)]*conjf(cu[4*(kj+l1)]) + cu[1+4*(kj+l1)]*conjf(cu[1+4*(kj+l1)]) + cu[2+4*(kj+l1)]*conjf(cu[2+4*(kj+l1)])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15), (float *)&ffc[j+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ /* at4 = dkz*at1; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j+ll]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*(j+lj)]) */ /* + crealf(cu[2+4*(j+lj)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+lj)]) */ /* + crealf(cu[1+4*(j+lj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+lj)]) */ /* + crealf(cu[4*(j+lj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*(j+lj)] = -at4*zt2; */ /* bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+lj)] = at2*zt2; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt1); /* wp += at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)]) */ /* + cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)]) */ /* + cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)]) */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* bxyz[4*(j+k1+lj)] = zero; */ /* bxyz[1+4*(j+k1+lj)] = zero; */ /* bxyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero); /* zt1 = -cimagf(cu[2+4*(j+l1)]) */ /* + crealf(cu[2+4*(j+l1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+l1)]) */ /* + crealf(cu[1+4*(j+l1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+l1)]) */ /* + crealf(cu[4*(j+l1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(771),v_zero, v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(3084),v_zero, v_at3); /* bxyz[4*(j+l1)] = at4*zt2; */ /* bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1; */ /* bxyz[2+4*(j+l1)] = at2*zt2; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt1); /* wp += at1*(cu[4*(j+l1)]*conjf(cu[4*(j+l1)]) */ /* + cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)]) */ /* + cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j+ll]); at2 = at1*dnx*(float) j; at4 = dkz*at1; at1 = at1*cimagf(ffc[j+ll]); zt1 = -cimagf(cu[2+4*(j+lj)]) + crealf(cu[2+4*(j+lj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+lj)]) + crealf(cu[1+4*(j+lj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+lj)]) + crealf(cu[4*(j+lj)])*_Complex_I; bxyz[4*(j+lj)] = -at4*zt2; bxyz[1+4*(j+lj)] = at4*zt3 - at2*zt1; bxyz[2+4*(j+lj)] = at2*zt2; bxyz[4*(j+k1+lj)] = zero; bxyz[1+4*(j+k1+lj)] = zero; bxyz[2+4*(j+k1+lj)] = zero; zt1 = -cimagf(cu[2+4*(j+l1)]) + crealf(cu[2+4*(j+l1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+l1)]) + crealf(cu[1+4*(j+l1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+l1)]) + crealf(cu[4*(j+l1)])*_Complex_I; bxyz[4*(j+l1)] = at4*zt2; bxyz[1+4*(j+l1)] = -at4*zt3 - at2*zt1; bxyz[2+4*(j+l1)] = at2*zt2; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(cu[4*(j+lj)]*conjf(cu[4*(j+lj)]) + cu[1+4*(j+lj)]*conjf(cu[1+4*(j+lj)]) + cu[2+4*(j+lj)]*conjf(cu[2+4*(j+lj)]) + cu[4*(j+l1)]*conjf(cu[4*(j+l1)]) + cu[1+4*(j+l1)]*conjf(cu[1+4*(j+l1)]) + cu[2+4*(j+l1)]*conjf(cu[2+4*(j+l1)])); wp += (double) at1; } /* mode numbers kx = 0, nx/2 */ at1 = ci2*crealf(ffc[ll]); at4 = dkz*at1; at1 = at1*cimagf(ffc[ll]); zt2 = -cimagf(cu[1+4*(lj)]) + crealf(cu[1+4*(lj)])*_Complex_I; zt3 = -cimagf(cu[4*(lj)]) + crealf(cu[4*(lj)])*_Complex_I; bxyz[4*lj] = -at4*zt2; bxyz[1+4*lj] = at4*zt3; bxyz[2+4*lj] = zero; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; at1 = at1*(cu[4*lj]*conjf(cu[4*lj]) + cu[1+4*lj]*conjf(cu[1+4*lj]) + cu[2+4*lj]*conjf(cu[2+4*lj])); wp += (double) at1; /* sum1 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum1 += (wp + dd[0]); } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum2 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,at1,at2,at3,zt1,zt2,zt3,wp,v_it,v_dkx,v_dky, \ v_dk1,v_dk2,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,v_d,v_wp, \ dd) \ reduction(+:sum2) for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; v_wp = _mm512_set1_pd(0.0); /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j+kk]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j+kk]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15), (float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ /* at3 = dky*at1; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j+kk]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*(j+kj)]) */ /* + crealf(cu[2+4*(j+kj)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+kj)]) */ /* + crealf(cu[1+4*(j+kj)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+kj)]) */ /* + crealf(cu[4*(j+kj)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+kj)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*(j+kj)] = at3*zt1; */ /* bxyz[1+4*(j+kj)] = -at2*zt1; */ /* bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt1); /* wp += at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)]) */ /* + cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)]) */ /* + cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)])); */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* zt1 = -cimagf(cu[2+4*(j+k1)]) */ /* + crealf(cu[2+4*(j+k1)])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*(j+k1)]) */ /* + crealf(cu[1+4*(j+k1)])*_Complex_I; */ /* zt3 = -cimagf(cu[4*(j+k1)]) */ /* + crealf(cu[4*(j+k1)])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*(j+k1)]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_zt1 = _mm512_mask_sub_ps(v_at2,_mm512_int2mask(12336),v_zero, v_at2); v_zt2 = _mm512_mask_sub_ps(v_at3,_mm512_int2mask(771),v_zero, v_at3); /* bxyz[4*(j+k1)] = -at3*zt1; */ /* bxyz[1+4*(j+k1)] = -at2*zt1; */ /* bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3; */ v_zt1 = _mm512_mul_ps(v_zt1,v_zt3); v_zt2 = _mm512_mul_ps(v_zt2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255), v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255), v_zero); } _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt1); /* wp += at1*(cu[4*(j+k1)]*conjf(cu[4*(j+k1)]) */ /* + cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)]) */ /* + cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)])); */ v_zt4 = _mm512_fmadd_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3),v_zt4); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+kj+l1)] = zero; */ /* bxyz[1+4*(j+kj+l1)] = zero; */ /* bxyz[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; at1 = at1*cimagf(ffc[j+kk]); zt1 = -cimagf(cu[2+4*(j+kj)]) + crealf(cu[2+4*(j+kj)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+kj)]) + crealf(cu[1+4*(j+kj)])*_Complex_I; zt3 = -cimagf(cu[4*(j+kj)]) + crealf(cu[4*(j+kj)])*_Complex_I; bxyz[4*(j+kj)] = at3*zt1; bxyz[1+4*(j+kj)] = -at2*zt1; bxyz[2+4*(j+kj)] = at2*zt2 - at3*zt3; zt1 = -cimagf(cu[2+4*(j+k1)]) + crealf(cu[2+4*(j+k1)])*_Complex_I; zt2 = -cimagf(cu[1+4*(j+k1)]) + crealf(cu[1+4*(j+k1)])*_Complex_I; zt3 = -cimagf(cu[4*(j+k1)]) + crealf(cu[4*(j+k1)])*_Complex_I; bxyz[4*(j+k1)] = -at3*zt1; bxyz[1+4*(j+k1)] = -at2*zt1; bxyz[2+4*(j+k1)] = at2*zt2 + at3*zt3; bxyz[4*(j+kj+l1)] = zero; bxyz[1+4*(j+kj+l1)] = zero; bxyz[2+4*(j+kj+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(cu[4*(j+kj)]*conjf(cu[4*(j+kj)]) + cu[1+4*(j+kj)]*conjf(cu[1+4*(j+kj)]) + cu[2+4*(j+kj)]*conjf(cu[2+4*(j+kj)]) + cu[4*(j+k1)]*conjf(cu[4*(j+k1)]) + cu[1+4*(j+k1)]*conjf(cu[1+4*(j+k1)]) + cu[2+4*(j+k1)]*conjf(cu[2+4*(j+k1)])); wp += (double) at1; } /* sum2 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (wp + dd[0]); } /* mode numbers kx = 0, nx/2 */ wp = 0.0; v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = ci2*crealf(ffc[kk]); at3 = at1*dny*(float) k; at1 = at1*cimagf(ffc[kk]); zt1 = -cimagf(cu[2+4*(kj)]) + crealf(cu[2+4*(kj)])*_Complex_I; zt3 = -cimagf(cu[4*(kj)]) + crealf(cu[4*(kj)])*_Complex_I; bxyz[4*kj] = at3*zt1; bxyz[1+4*kj] = zero; bxyz[2+4*kj] = -at3*zt3; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; bxyz[4*(kj+l1)] = zero; bxyz[1+4*(kj+l1)] = zero; bxyz[2+4*(kj+l1)] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; at1 = at1*(cu[4*kj]*conjf(cu[4*kj]) + cu[1+4*kj]*conjf(cu[1+4*kj]) + cu[2+4*kj]*conjf(cu[2+4*kj])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = ci2*crealf(ffc[j]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1,_mm512_int2mask(15), (float *)&ffc[j+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(43690),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_ci2,v_at1); /* at2 = at1*dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx); /* normalize curl operators */ v_at2 = _mm512_mul_ps(v_at1,v_dk1); v_at3 = _mm512_mul_ps(v_at1,v_dk2); /* at1 = at1*cimagf(ffc[j]); */ v_at4 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at4, _mm512_int2mask(21845),(__m512i)v_at4,177); v_at1 = _mm512_mul_ps(v_at1,v_at4); /* zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I; */ v_zt3 = _mm512_load_ps((float *)&cu[4*j]); v_zt3 = _mm512_mask_sub_ps(v_zt3,_mm512_int2mask(43690),v_zero, v_zt3); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* bxyz[4*j] = zero; */ /* bxyz[1+4*j] = -at2*zt1; */ /* bxyz[2+4*j] = at2*zt2; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_sub_ps(v_zt1,v_zt2); /* zero out kx = 0 mode */ if (j==0) { v_zt1 = _mm512_mask_mov_ps(v_zt1,_mm512_int2mask(255),v_zero); v_zt3 = _mm512_mask_mov_ps(v_zt3,_mm512_int2mask(255),v_zero); } _mm512_store_ps((float *)&bxyz[4*j],v_zt1); /* wp += at1*(cu[4*j]*conjf(cu[4*j]) */ /* + cu[1+4*j]*conjf(cu[1+4*j]) */ /* + cu[2+4*j]*conjf(cu[2+4*j])); */ v_zt4 = _mm512_mul_ps(v_at1,_mm512_mask_mul_ps(v_zero, _mm512_int2mask(16191),v_zt3,v_zt3)); /* convert to double precision before accumulating */ v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt4)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt4,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1)] = zero; */ /* bxyz[1+4*(j+k1)] = zero; */ /* bxyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero); /* bxyz[4*(j+l1)] = zero; */ /* bxyz[1+4*(j+l1)] = zero; */ /* bxyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = ci2*crealf(ffc[j]); at2 = at1*dnx*(float) j; at1 = at1*cimagf(ffc[j]); zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I; zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I; bxyz[4*j] = zero; bxyz[1+4*j] = -at2*zt1; bxyz[2+4*j] = at2*zt2; bxyz[4*(j+k1)] = zero; bxyz[1+4*(j+k1)] = zero; bxyz[2+4*(j+k1)] = zero; bxyz[4*(j+l1)] = zero; bxyz[1+4*(j+l1)] = zero; bxyz[2+4*(j+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; at1 = at1*(cu[4*j]*conjf(cu[4*j]) + cu[1+4*j]*conjf(cu[1+4*j]) + cu[2+4*j]*conjf(cu[2+4*j])); wp += (double) at1; } bxyz[0] = zero; bxyz[1] = zero; bxyz[2] = zero; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; /* sum2 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (wp + dd[0]); /* *wm = wp*((float) nx)*((float) ny)*((float) nz); */ *wm = (sum1 + sum2)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void ckncmmaxwel3(float complex exyz[], float complex bxyz[], float complex cu[], float complex ffc[], float ci, float dt, float *wf, float *wm, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine solves 3d maxwell's equation in fourier space for transverse electric and magnetic fields with periodic boundary conditions. input: all, output: wf, wm, exyz, bxyz approximate flop count is: 680*nxc*nyc*nzc + 149*(nxc*nyc + nxc*nzc + nyc*nzc) plus nxc*nyc*nzc divides where nxc = nx/2 - 1, nyc = ny/2 - 1, nzc = nz/2 - 1 the magnetic field is first updated half a step using the equations: bx[kz][ky][kx] = bx[kz][ky][kx] - .5*dt*sqrt(-1)* (ky*ez[kz][ky][kx]-kz*ey[kz][ky][kx]) by[kz][ky][kx] = by[kz][ky][kx] - .5*dt*sqrt(-1)* (kz*ex[kz][ky][kx]-kx*ez[kz][ky][kx]) bz[kz][ky][kx] = bz[kz][ky][kx] - .5*dt*sqrt(-1)* (kx*ey[kz][ky][kx]-ky*ex[kz][ky][kx]) the electric field is then updated a whole step using the equations: ex[kz][ky][kx] = ex[kz][ky][kx] + c2*dt*sqrt(-1) *(ky*bz[kz][ky][kx]-kz*by[kz][ky][kx]) - affp*dt*cux[kz][ky][kx]*s[kz][ky][kx] ey[kz][ky][kx] = ey[kz][ky][kx] + c2*dt*sqrt(-1)* *(kz*bx[kz][ky][kx]-kx*bz[kz][ky][kx]) - affp*dt*cuy[kz][ky][kx]*s[kz][ky][kx] ez[kz][ky][kx] = ez[kz][ky][kx] + c2*dt*sqrt(-1) *(kx*by[kz][ky][kx]-ky*bx[kz][ky][kx]) - affp*dt*cuz[kz][ky][kx]*s[kz][ky][kx] the magnetic field is finally updated the remaining half step with the new electric field and the previous magnetic field equations. where kx = 2pi*j/nx, ky = 2pi*k/ny, kz = 2pi*l/nz, c2 = 1./(ci*ci) and s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2) j,k,l = fourier mode numbers, except for ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0, ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0, ex(kz=pi) = ey(kz=pi) = ez(kz=pi) = 0, ex(kx=0,ky=0,kz=0) = ey(kx=0,ky=0,kz=0) = ez(kx=0,ky=0,kz=0) = 0. and similarly for bx, by, bz. cu[l][k][j][i] = complex current density exyz[l][k][j][i] = complex transverse electric field bxyz[l][k][j][i] = complex magnetic field for component i, all for fourier mode (j1,k,l) real(ffc[0][0][0]) = affp = normalization constant = nx*ny*nz/np, where np=number of particles aimag(ffc[l][k][j]) = finite-size particle shape factor s, s[kz][ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2+(kz*az)**2)/2) for fourier mode (j,k,l) ci = reciprocal of velocity of light dt = time interval between successive calculations transverse electric field energy is also calculated, using wf = nx*ny*nz**sum((1/affp)*|exyz[kz][ky][kx]|**2) magnetic field energy is also calculated, using wm = nx*ny*nz**sum((c2/affp)*|bxyz[kz][ky][kx]|**2) nx/ny/nz = system length in x/y/z direction nxvh = second dimension of field arrays, must be >= nxh nyv = third dimension of field arrays, must be >= ny nzv = fourth dimension of field arrays, must be >= nz nxhd = second dimension of form factor array, must be >= nxh nyhd = third dimension of form factor array, must be >= nyh nzhd = fourth dimension of form factor array, must be >= nzh requires KNC, cu, exyz, bxyz, ffc need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 cu, exyz, bxyz needs to have 4 components local data */ int nxh, nyh, nzh, nxhs, itn, j, k, l, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float dnx, dny, dnz, dth, c2, cdt, affp, anorm, dkx, dky, dkz; float adt, afdt; float at1; float complex zero, zt1, zt2, zt3, zt4, zt5, zt6, zt7, zt8, zt9; double wp, ws, sum1, sum2, sum3, sum4; __m512i v_j, v_it, v_n, v_m; __m512 v_dnx, v_dny, v_dnz, v_dkx, v_dky, v_dkz; __m512 v_zero, v_cdt, v_adt, v_afdt, v_dth, v_anorm; __m512 v_dk1, v_dk2, v_at2, v_at3; __m512 v_zt1, v_zt2, v_zt3, v_zt4, v_zt5, v_zt6, v_zt7; __m512d v_wp, v_ws, v_d; __attribute__((aligned(64))) double dd[8]; if (ci <= 0.0) return; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dnz = 6.28318530717959/(float) nz; dth = 0.5*dt; c2 = 1.0/(ci*ci); cdt = c2*dt; affp = creal(ffc[0]); adt = affp*dt; zero = 0.0 + 0.0*_Complex_I; anorm = 1.0/affp; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); v_n = _mm512_set_epi32(15,14,11,10,9,8,13,12,7,6,3,2,1,0,5,4); v_m = _mm512_set_epi32(15,14,9,8,13,12,11,10,7,6,1,0,5,4,3,2); v_dnx = _mm512_set1_ps(dnx); v_dny = _mm512_set1_ps(dny); v_dnz = _mm512_set1_ps(dnz); v_zero = _mm512_setzero_ps(); v_cdt = _mm512_set1_ps(cdt); v_adt = _mm512_set1_ps(adt); v_dth = _mm512_set1_ps(dth); v_anorm = _mm512_set1_ps(anorm); /* update electromagnetic field and sum field energies */ sum1 = 0.0; sum2 = 0.0; /* calculate the electromagnetic fields */ /* mode numbers 0 < kx < nx/2, 0 < ky < ny/2, and 0 < kz < nz/2 */ #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,ll,lj,kk,kj,dkz,dky,dkx,afdt,at1,zt1,zt2,zt3,zt4, \ zt5,zt6,zt7,zt8,zt9,ws,wp,v_it,v_dkx,v_dky,v_dkz,v_dk1,v_dk2,v_afdt, \ v_at2,v_at3,v_zt1,v_zt2,v_zt3,v_zt4,v_zt5,v_zt6,v_zt7,v_d,v_ws,v_wp,dd) \ reduction(+:sum1,sum2) for (l = 1; l < nzh; l++) { dkz = dnz*(float) l; v_dkz = _mm512_cvtfxpnt_round_adjustepi32_ps(_mm512_set1_epi32(l), _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkz = _mm512_mul_ps(v_dnz,v_dkz); ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; ws = 0.0; wp = 0.0; v_ws = _mm512_set1_pd(0.0); v_wp = _mm512_set1_pd(0.0); /* add kz to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dkz); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkz); for (k = 1; k < nyh; k++) { dky = dny*(float) k; v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336), v_dky); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771), v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* afdt = adt*cimagf(ffc[j+kk+ll]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step, ky > 0, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+kj+lj)]) */ /* + crealf(exyz[2+4*(j+kj+lj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+kj+lj)]) */ /* + crealf(exyz[1+4*(j+kj+lj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+kj+lj)]) */ /* + crealf(exyz[4*(j+kj+lj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2); */ /* zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1); */ /* zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+lj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2) */ /* - afdt*cu[4*(j+kj+lj)]; */ /* zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1) */ /* - afdt*cu[1+4*(j+kj+lj)]; */ /* zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3) */ /* - afdt*cu[2+4*(j+kj+lj)]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+lj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+kj+lj)] = zt7; */ /* exyz[1+4*(j+kj+lj)] = zt8; */ /* exyz[2+4*(j+kj+lj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+kj+lj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+kj+lj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt4 -= dth*(dky*zt1 - dkz*zt2); */ /* zt5 -= dth*(dkz*zt3 - dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+kj+lj)] = zt4; */ /* bxyz[1+4*(j+kj+lj)] = zt5; */ /* bxyz[2+4*(j+kj+lj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+kj+lj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+kj+lj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* update magnetic field half time step, ky < 0, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+k1+lj)]) */ /* + crealf(exyz[2+4*(j+k1+lj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+k1+lj)]) */ /* + crealf(exyz[1+4*(j+k1+lj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+k1+lj)]) */ /* + crealf(exyz[4*(j+k1+lj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336), v_zero,v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771), v_zero,v_dk2); /* zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2); */ /* zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1); */ /* zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+lj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2) */ /* - afdt*cu[4*(j+k1+lj)]; */ /* zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1) */ /* - afdt*cu[1+4*(j+k1+lj)]; */ /* zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3) */ /* - afdt*cu[2+4*(j+k1+lj)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+lj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+k1+lj)] = zt7; */ /* exyz[1+4*(j+k1+lj)] = zt8; */ /* exyz[2+4*(j+k1+lj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+k1+lj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 += dth*(dky*zt1 + dkz*zt2); */ /* zt5 -= dth*(dkz*zt3 - dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+k1+lj)] = zt4; */ /* bxyz[1+4*(j+k1+lj)] = zt5; */ /* bxyz[2+4*(j+k1+lj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+k1+lj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* update magnetic field half time step, ky > 0, kz < 0 */ /* zt1 = -cimagf(exyz[2+4*(j+kj+l1)]) */ /* + crealf(exyz[2+4*(j+kj+l1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+kj+l1)]) */ /* + crealf(exyz[1+4*(j+kj+l1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+kj+l1)]) */ /* + crealf(exyz[4*(j+kj+l1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771), v_zero,v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084), v_zero,v_dk2); /* zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2); */ /* zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1); */ /* zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj+l1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2) */ /* - afdt*cu[4*(j+kj+l1)]; */ /* zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1) */ /* - afdt*cu[1+4*(j+kj+l1)]; */ /* zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3) */ /* - afdt*cu[2+4*(j+kj+l1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj+l1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+kj+l1)] = zt7; */ /* exyz[1+4*(j+kj+l1)] = zt8; */ /* exyz[2+4*(j+kj+l1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+kj+l1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 -= dth*(dky*zt1 + dkz*zt2); */ /* zt5 += dth*(dkz*zt3 + dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+kj+l1)] = zt4; */ /* bxyz[1+4*(j+kj+l1)] = zt5; */ /* bxyz[2+4*(j+kj+l1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+kj+l1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* update magnetic field half time step, ky < 0, kz < 0 */ /* zt1 = -cimagf(exyz[2+4*(j+k1+l1)]) */ /* + crealf(exyz[2+4*(j+k1+l1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+k1+l1)]) */ /* + crealf(exyz[1+4*(j+k1+l1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+k1+l1)]) */ /* + crealf(exyz[4*(j+k1+l1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(13107), v_zero,v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3855), v_zero,v_dk2); /* zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2); */ /* zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1); */ /* zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1+l1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690), v_zero,v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2) */ /* - afdt*cu[4*(j+k1+l1)]; */ /* zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1) */ /* - afdt*cu[1+4*(j+k1+l1)]; */ /* zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3) */ /* - afdt*cu[2+4*(j+k1+l1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1+l1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690), v_zero,v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+k1+l1)] = zt7; */ /* exyz[1+4*(j+k1+l1)] = zt8; */ /* exyz[2+4*(j+k1+l1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+k1+l1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) */ /* + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 += dth*(dky*zt1 - dkz*zt2); */ /* zt5 += dth*(dkz*zt3 + dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+k1+l1)] = zt4; */ /* bxyz[1+4*(j+k1+l1)] = zt5; */ /* bxyz[2+4*(j+k1+l1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+k1+l1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) */ /* + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+kk+ll]); /* update magnetic field half time step, ky > 0, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+kj+lj)]) + crealf(exyz[2+4*(j+kj+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+kj+lj)]) + crealf(exyz[1+4*(j+kj+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+kj+lj)]) + crealf(exyz[4*(j+kj+lj)])*_Complex_I; zt4 = bxyz[4*(j+kj+lj)] - dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+4*(j+kj+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+4*(j+kj+lj)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+kj+lj)] + cdt*(dky*zt1 - dkz*zt2) - afdt*cu[4*(j+kj+lj)]; zt8 = exyz[1+4*(j+kj+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+4*(j+kj+lj)]; zt9 = exyz[2+4*(j+kj+lj)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+4*(j+kj+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+kj+lj)] = zt7; exyz[1+4*(j+kj+lj)] = zt8; exyz[2+4*(j+kj+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 - dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[4*(j+kj+lj)] = zt4; bxyz[1+4*(j+kj+lj)] = zt5; bxyz[2+4*(j+kj+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky < 0, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+k1+lj)]) + crealf(exyz[2+4*(j+k1+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+k1+lj)]) + crealf(exyz[1+4*(j+k1+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+k1+lj)]) + crealf(exyz[4*(j+k1+lj)])*_Complex_I; zt4 = bxyz[4*(j+k1+lj)] + dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+4*(j+k1+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+4*(j+k1+lj)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+k1+lj)] - cdt*(dky*zt1 + dkz*zt2) - afdt*cu[4*(j+k1+lj)]; zt8 = exyz[1+4*(j+k1+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+4*(j+k1+lj)]; zt9 = exyz[2+4*(j+k1+lj)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+4*(j+k1+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+k1+lj)] = zt7; exyz[1+4*(j+k1+lj)] = zt8; exyz[2+4*(j+k1+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dky*zt1 + dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[4*(j+k1+lj)] = zt4; bxyz[1+4*(j+k1+lj)] = zt5; bxyz[2+4*(j+k1+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky > 0, kz < 0 */ zt1 = -cimagf(exyz[2+4*(j+kj+l1)]) + crealf(exyz[2+4*(j+kj+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+kj+l1)]) + crealf(exyz[1+4*(j+kj+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+kj+l1)]) + crealf(exyz[4*(j+kj+l1)])*_Complex_I; zt4 = bxyz[4*(j+kj+l1)] - dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+4*(j+kj+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+4*(j+kj+l1)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+kj+l1)] + cdt*(dky*zt1 + dkz*zt2) - afdt*cu[4*(j+kj+l1)]; zt8 = exyz[1+4*(j+kj+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+4*(j+kj+l1)]; zt9 = exyz[2+4*(j+kj+l1)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+4*(j+kj+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+kj+l1)] = zt7; exyz[1+4*(j+kj+l1)] = zt8; exyz[2+4*(j+kj+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 + dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[4*(j+kj+l1)] = zt4; bxyz[1+4*(j+kj+l1)] = zt5; bxyz[2+4*(j+kj+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky < 0, kz < 0 */ zt1 = -cimagf(exyz[2+4*(j+k1+l1)]) + crealf(exyz[2+4*(j+k1+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+k1+l1)]) + crealf(exyz[1+4*(j+k1+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+k1+l1)]) + crealf(exyz[4*(j+k1+l1)])*_Complex_I; zt4 = bxyz[4*(j+k1+l1)] + dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+4*(j+k1+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+4*(j+k1+l1)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+k1+l1)] - cdt*(dky*zt1 - dkz*zt2) - afdt*cu[4*(j+k1+l1)]; zt8 = exyz[1+4*(j+k1+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+4*(j+k1+l1)]; zt9 = exyz[2+4*(j+k1+l1)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+4*(j+k1+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+k1+l1)] = zt7; exyz[1+4*(j+k1+l1)] = zt8; exyz[2+4*(j+k1+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dky*zt1 - dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[4*(j+k1+l1)] = zt4; bxyz[1+4*(j+k1+l1)] = zt5; bxyz[2+4*(j+k1+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; } } /* mode numbers kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; afdt = adt*cimagf(ffc[kk+ll]); /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+4*(kj+lj)]) + crealf(exyz[2+4*(kj+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(kj+lj)]) + crealf(exyz[1+4*(kj+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(kj+lj)]) + crealf(exyz[4*(kj+lj)])*_Complex_I; zt4 = bxyz[4*(kj+lj)] - dth*(dky*zt1 - dkz*zt2); zt5 = bxyz[1+4*(kj+lj)] - dth*(dkz*zt3); zt6 = bxyz[2+4*(kj+lj)] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(kj+lj)] + cdt*(dky*zt1 - dkz*zt2) - afdt*cu[4*(kj+lj)]; zt8 = exyz[1+4*(kj+lj)] + cdt*(dkz*zt3) - afdt*cu[1+4*(kj+lj)]; zt9 = exyz[2+4*(kj+lj)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(kj+lj)] = zt7; exyz[1+4*(kj+lj)] = zt8; exyz[2+4*(kj+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 - dkz*zt2); zt5 -= dth*(dkz*zt3); zt6 += dth*(dky*zt3); bxyz[4*(kj+lj)] = zt4; bxyz[1+4*(kj+lj)] = zt5; bxyz[2+4*(kj+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; exyz[4*(k1+lj)] = zero; exyz[1+4*(k1+lj)] = zero; exyz[2+4*(k1+lj)] = zero; /* update magnetic field half time step, kz < 0 */ zt1 = -cimagf(exyz[2+4*(kj+l1)]) + crealf(exyz[2+4*(kj+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(kj+l1)]) + crealf(exyz[1+4*(kj+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(kj+l1)]) + crealf(exyz[4*(kj+l1)])*_Complex_I; zt4 = bxyz[4*(kj+l1)] - dth*(dky*zt1 + dkz*zt2); zt5 = bxyz[1+4*(kj+l1)] + dth*(dkz*zt3); zt6 = bxyz[2+4*(kj+l1)] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(kj+l1)] + cdt*(dky*zt1 + dkz*zt2) - afdt*cu[4*(kj+l1)]; zt8 = exyz[1+4*(kj+l1)] - cdt*(dkz*zt3) - afdt*cu[1+4*(kj+l1)]; zt9 = exyz[2+4*(kj+l1)] - cdt*(dky*zt3) - afdt*cu[2+4*(kj+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(kj+l1)] = zt7; exyz[1+4*(kj+l1)] = zt8; exyz[2+4*(kj+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1 + dkz*zt2); zt5 += dth*(dkz*zt3); zt6 += dth*(dky*zt3); bxyz[4*(kj+l1)] = zt4; bxyz[1+4*(kj+l1)] = zt5; bxyz[2+4*(kj+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; exyz[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(12336),v_zero); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(771),v_zero); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* afdt = adt*cimagf(ffc[j+ll]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+ll]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+ll+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+lj)]) */ /* + crealf(exyz[2+4*(j+lj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+lj)]) */ /* + crealf(exyz[1+4*(j+lj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+lj)]) */ /* + crealf(exyz[4*(j+lj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+lj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2); */ /* zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1); */ /* zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+lj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)]; */ /* zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1) */ /* - afdt*cu[1+4*(j+lj)]; */ /* zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+lj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+lj)] = zt7; */ /* exyz[1+4*(j+lj)] = zt8; */ /* exyz[2+4*(j+lj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+lj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+lj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt4 += dth*(dkz*zt2); */ /* zt5 -= dth*(dkz*zt3 - dkx*zt1); */ /* zt6 -= dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+lj)] = zt4; */ /* bxyz[1+4*(j+lj)] = zt5; */ /* bxyz[2+4*(j+lj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+lj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+lj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* bxyz[4*(j+k1+lj)] = zero; */ /* bxyz[1+4*(j+k1+lj)] = zero; */ /* bxyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+lj)],v_zero); /* exyz[4*(j+k1+lj)] = zero; */ /* exyz[1+4*(j+k1+lj)] = zero; */ /* exyz[2+4*(j+k1+lj)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+lj)],v_zero); /* update magnetic field half time step, kz > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+l1)]) */ /* + crealf(exyz[2+4*(j+l1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+l1)]) */ /* + crealf(exyz[1+4*(j+l1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+l1)]) */ /* + crealf(exyz[4*(j+l1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(771),v_zero, v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(3084),v_zero, v_dk2); /* zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2); */ /* zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1); */ /* zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+l1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)]; */ /* zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1) */ /* - afdt*cu[1+4*(j+l1)]; */ /* zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+l1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+l1)] = zt7; */ /* exyz[1+4*(j+l1)] = zt8; */ /* exyz[2+4*(j+l1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+l1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+l1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 -= dth*(dkz*zt2); */ /* zt5 += dth*(dkz*zt3 + dkx*zt1); */ /* zt6 -= dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+l1)] = zt4; */ /* bxyz[1+4*(j+l1)] = zt5; */ /* bxyz[2+4*(j+l1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+l1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); /* exyz[4*(j+k1+l1)] = zero; */ /* exyz[1+4*(j+k1+l1)] = zero; */ /* exyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+ll]); /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+lj)]) + crealf(exyz[2+4*(j+lj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+lj)]) + crealf(exyz[1+4*(j+lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+lj)]) + crealf(exyz[4*(j+lj)])*_Complex_I; zt4 = bxyz[4*(j+lj)] + dth*(dkz*zt2); zt5 = bxyz[1+4*(j+lj)] - dth*(dkz*zt3 - dkx*zt1); zt6 = bxyz[2+4*(j+lj)] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+lj)] - cdt*(dkz*zt2) - afdt*cu[4*(j+lj)]; zt8 = exyz[1+4*(j+lj)] + cdt*(dkz*zt3 - dkx*zt1) - afdt*cu[1+4*(j+lj)]; zt9 = exyz[2+4*(j+lj)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+lj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+lj)] = zt7; exyz[1+4*(j+lj)] = zt8; exyz[2+4*(j+lj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dkz*zt2); zt5 -= dth*(dkz*zt3 - dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[4*(j+lj)] = zt4; bxyz[1+4*(j+lj)] = zt5; bxyz[2+4*(j+lj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+k1+lj)] = zero; bxyz[1+4*(j+k1+lj)] = zero; bxyz[2+4*(j+k1+lj)] = zero; exyz[4*(j+k1+lj)] = zero; exyz[1+4*(j+k1+lj)] = zero; exyz[2+4*(j+k1+lj)] = zero; /* update magnetic field half time step, kz > 0 */ zt1 = -cimagf(exyz[2+4*(j+l1)]) + crealf(exyz[2+4*(j+l1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+l1)]) + crealf(exyz[1+4*(j+l1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+l1)]) + crealf(exyz[4*(j+l1)])*_Complex_I; zt4 = bxyz[4*(j+l1)] - dth*(dkz*zt2); zt5 = bxyz[1+4*(j+l1)] + dth*(dkz*zt3 + dkx*zt1); zt6 = bxyz[2+4*(j+l1)] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+l1)] + cdt*(dkz*zt2) - afdt*cu[4*(j+l1)]; zt8 = exyz[1+4*(j+l1)] - cdt*(dkz*zt3 + dkx*zt1) - afdt*cu[1+4*(j+l1)]; zt9 = exyz[2+4*(j+l1)] + cdt*(dkx*zt2) - afdt*cu[2+4*(j+l1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+l1)] = zt7; exyz[1+4*(j+l1)] = zt8; exyz[2+4*(j+l1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dkz*zt2); zt5 += dth*(dkz*zt3 + dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[4*(j+l1)] = zt4; bxyz[1+4*(j+l1)] = zt5; bxyz[2+4*(j+l1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; exyz[4*(j+k1+l1)] = zero; exyz[1+4*(j+k1+l1)] = zero; exyz[2+4*(j+k1+l1)] = zero; } /* mode numbers kx = 0, nx/2 */ afdt = adt*cimagf(ffc[ll]); /* update magnetic field half time step */ zt2 = -cimagf(exyz[1+4*(lj)]) + crealf(exyz[1+4*(lj)])*_Complex_I; zt3 = -cimagf(exyz[4*(lj)]) + crealf(exyz[4*(lj)])*_Complex_I; zt4 = bxyz[4*lj] + dth*(dkz*zt2); zt5 = bxyz[1+4*lj] - dth*(dkz*zt3); /* update electric field whole time step */ zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*lj] - cdt*(dkz*zt2) - afdt*cu[4*lj]; zt8 = exyz[1+4*lj] + cdt*(dkz*zt3) - afdt*cu[1+4*lj]; /* update magnetic field half time step and store electric field */ zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*lj] = zt7; exyz[1+4*lj] = zt8; exyz[2+4*lj] = zero; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8)); ws += (double) at1; zt4 += dth*(dkz*zt2); zt5 -= dth*(dkz*zt3); bxyz[4*lj] = zt4; bxyz[1+4*lj] = zt5; bxyz[2+4*lj] = zero; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5)); wp += (double) at1; bxyz[4*(k1+lj)] = zero; bxyz[1+4*(k1+lj)] = zero; bxyz[2+4*(k1+lj)] = zero; exyz[4*(k1+lj)] = zero; exyz[1+4*(k1+lj)] = zero; exyz[2+4*(k1+lj)] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; exyz[4*l1] = zero; exyz[1+4*l1] = zero; exyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; /* sum1 += ws; */ _mm512_store_pd(&dd[0],v_ws); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum1 += (ws + dd[0]); /* sum2 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum2 += (wp + dd[0]); } } /* mode numbers kz = 0, nz/2 */ l1 = nxvyh*nzh; sum3 = 0.0; sum4 = 0.0; #pragma omp parallel for \ private(j,k,k1,kk,kj,dky,dkx,afdt,at1,zt1,zt2,zt3,zt4,zt5,zt6,zt7,zt8, \ zt9,ws,wp,v_it,v_dkx,v_dky,v_dk1,v_dk2,v_afdt,v_at2,v_at3,v_zt1,v_zt2, \ v_zt3,v_zt4,v_zt5,v_zt6,v_zt7,v_d,v_ws,v_wp,dd) \ reduction(+:sum3,sum4) for (k = 1; k < nyh; k++) { /* dky = dny*(float) k; */ v_it = _mm512_set1_epi32(k); v_dky = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dky = _mm512_mul_ps(v_dny,v_dky); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; ws = 0.0; wp = 0.0; v_ws = _mm512_set1_pd(0.0); v_wp = _mm512_set1_pd(0.0); /* add ky to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dky); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(771),v_dky); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_dk1,_mm512_int2mask(3084), v_dkx); v_dk2 = _mm512_mask_mov_ps(v_dk2,_mm512_int2mask(12336), v_dkx); /* afdt = adt*cimagf(ffc[j+kk]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step, ky > 0 */ /* zt1 = -cimagf(exyz[2+4*(j+kj)]) */ /* + crealf(exyz[2+4*(j+kj)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+kj)]) */ /* + crealf(exyz[1+4*(j+kj)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+kj)]) */ /* + crealf(exyz[4*(j+kj)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+kj)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1); */ /* zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1); */ /* zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+kj)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)]; */ /* zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)]; */ /* zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3) */ /* - afdt*cu[2+4*(j+kj)]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2), v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+kj)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+kj)] = zt7; */ /* exyz[1+4*(j+kj)] = zt8; */ /* exyz[2+4*(j+kj)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+kj)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+kj)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt4 -= dth*(dky*zt1); */ /* zt5 += dth*(dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 - dky*zt3); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+kj)] = zt4; */ /* bxyz[1+4*(j+kj)] = zt5; */ /* bxyz[2+4*(j+kj)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+kj)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+kj)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* update magnetic field half time step, ky < 0 */ /* zt1 = -cimagf(exyz[2+4*(j+k1)]) */ /* + crealf(exyz[2+4*(j+k1)])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*(j+k1)]) */ /* + crealf(exyz[1+4*(j+k1)])*_Complex_I; */ /* zt3 = -cimagf(exyz[4*(j+k1)]) */ /* + crealf(exyz[4*(j+k1)])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); v_at2 = _mm512_mask_sub_ps(v_dk1,_mm512_int2mask(12336),v_zero, v_dk1); v_at3 = _mm512_mask_sub_ps(v_dk2,_mm512_int2mask(771),v_zero, v_dk2); /* zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1); */ /* zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1); */ /* zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*(j+k1)]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)]; */ /* zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)]; */ /* zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3) */ /* - afdt*cu[2+4*(j+k1)]; */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*(j+k1)]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191), v_afdt,v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*(j+k1)] = zt7; */ /* exyz[1+4*(j+k1)] = zt8; */ /* exyz[2+4*(j+k1)] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&exyz[4*(j+k1)], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*(j+k1)],v_zt4); } /* ws += anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4), v_zt6); /* zt4 += dth*(dky*zt1); */ /* zt5 += dth*(dkx*zt1); */ /* zt6 -= dth*(dkx*zt2 + dky*zt3); */ v_zt1 = _mm512_mul_ps(v_at2,v_zt3); v_zt2 = _mm512_mul_ps(v_at3,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*(j+k1)] = zt4; */ /* bxyz[1+4*(j+k1)] = zt5; */ /* bxyz[2+4*(j+k1)] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255), v_zero); _mm512_mask_store_ps((float *)&bxyz[4*(j+k1)], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zt5); } /* wp += anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_fmadd_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5), v_zt7); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+kj+l1)] = zero; */ /* bxyz[1+4*(j+kj+l1)] = zero; */ /* bxyz[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+kj+l1)],v_zero); /* exyz[4*(j+kj+l1)] = zero; */ /* exyz[1+4*(j+kj+l1)] = zero; */ /* exyz[2+4*(j+kj+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+kj+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); /* exyz[4*(j+k1+l1)] = zero; */ /* exyz[1+4*(j+k1+l1)] = zero; */ /* exyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j+kk]); /* update magnetic field half time step, ky > 0 */ zt1 = -cimagf(exyz[2+4*(j+kj)]) + crealf(exyz[2+4*(j+kj)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+kj)]) + crealf(exyz[1+4*(j+kj)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+kj)]) + crealf(exyz[4*(j+kj)])*_Complex_I; zt4 = bxyz[4*(j+kj)] - dth*(dky*zt1); zt5 = bxyz[1+4*(j+kj)] + dth*(dkx*zt1); zt6 = bxyz[2+4*(j+kj)] - dth*(dkx*zt2 - dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+kj)] + cdt*(dky*zt1) - afdt*cu[4*(j+kj)]; zt8 = exyz[1+4*(j+kj)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+kj)]; zt9 = exyz[2+4*(j+kj)] + cdt*(dkx*zt2 - dky*zt3) - afdt*cu[2+4*(j+kj)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+kj)] = zt7; exyz[1+4*(j+kj)] = zt8; exyz[2+4*(j+kj)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 - dky*zt3); bxyz[4*(j+kj)] = zt4; bxyz[1+4*(j+kj)] = zt5; bxyz[2+4*(j+kj)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; /* update magnetic field half time step, ky < 0 */ zt1 = -cimagf(exyz[2+4*(j+k1)]) + crealf(exyz[2+4*(j+k1)])*_Complex_I; zt2 = -cimagf(exyz[1+4*(j+k1)]) + crealf(exyz[1+4*(j+k1)])*_Complex_I; zt3 = -cimagf(exyz[4*(j+k1)]) + crealf(exyz[4*(j+k1)])*_Complex_I; zt4 = bxyz[4*(j+k1)] + dth*(dky*zt1); zt5 = bxyz[1+4*(j+k1)] + dth*(dkx*zt1); zt6 = bxyz[2+4*(j+k1)] - dth*(dkx*zt2 + dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*(j+k1)] - cdt*(dky*zt1) - afdt*cu[4*(j+k1)]; zt8 = exyz[1+4*(j+k1)] - cdt*(dkx*zt1) - afdt*cu[1+4*(j+k1)]; zt9 = exyz[2+4*(j+k1)] + cdt*(dkx*zt2 + dky*zt3) - afdt*cu[2+4*(j+k1)]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*(j+k1)] = zt7; exyz[1+4*(j+k1)] = zt8; exyz[2+4*(j+k1)] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt4 += dth*(dky*zt1); zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2 + dky*zt3); bxyz[4*(j+k1)] = zt4; bxyz[1+4*(j+k1)] = zt5; bxyz[2+4*(j+k1)] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+kj+l1)] = zero; bxyz[1+4*(j+kj+l1)] = zero; bxyz[2+4*(j+kj+l1)] = zero; exyz[4*(j+kj+l1)] = zero; exyz[1+4*(j+kj+l1)] = zero; exyz[2+4*(j+kj+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; exyz[4*(j+k1+l1)] = zero; exyz[1+4*(j+k1+l1)] = zero; exyz[2+4*(j+k1+l1)] = zero; } /* sum3 += ws; */ _mm512_store_pd(&dd[0],v_ws); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum3 += (ws + dd[0]); /* sum4 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum4 += (wp + dd[0]); } /* mode numbers kx = 0, nx/2 */ ws = 0.0; wp = 0.0; v_ws = _mm512_set1_pd(0.0); v_wp = _mm512_setzero_pd(); for (k = 1; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; afdt = adt*cimagf(ffc[kk]); /* update magnetic field half time step */ zt1 = -cimagf(exyz[2+4*(kj)]) + crealf(exyz[2+4*(kj)])*_Complex_I; zt3 = -cimagf(exyz[4*(kj)]) + crealf(exyz[4*(kj)])*_Complex_I; zt4 = bxyz[4*kj] - dth*(dky*zt1); zt6 = bxyz[2+4*kj] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exyz[4*kj] + cdt*(dky*zt1) - afdt*cu[4*kj]; zt9 = exyz[2+4*kj] - cdt*(dky*zt3) - afdt*cu[2+4*kj]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exyz[4*kj] = zt7; exyz[1+4*kj] = zero; exyz[2+4*kj] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1); zt6 += dth*(dky*zt3); bxyz[4*kj] = zt4; bxyz[1+4*kj] = zero; bxyz[2+4*kj] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; exyz[4*k1] = zero; exyz[1+4*k1] = zero; exyz[2+4*k1] = zero; bxyz[4*(kj+l1)] = zero; bxyz[1+4*(kj+l1)] = zero; bxyz[2+4*(kj+l1)]= zero; exyz[4*(kj+l1)] = zero; exyz[1+4*(kj+l1)] = zero; exyz[2+4*(kj+l1)] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; exyz[2+4*(k1+l1)] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* dkx = dnx*(float) j; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_dkx = _mm512_cvtfxpnt_round_adjustepi32_ps(v_it, _MM_FROUND_TO_ZERO,_MM_EXPADJ_NONE); v_dkx = _mm512_mul_ps(v_dnx,v_dkx); /* add kx to curl operators */ v_dk1 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(3084),v_dkx); v_dk2 = _mm512_mask_mov_ps(v_zero,_mm512_int2mask(12336),v_dkx); /* afdt = adt*cimagf(ffc[j]); */ v_afdt = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j]); v_afdt = _mm512_mask_loadunpackhi_ps(v_afdt, _mm512_int2mask(15),(float *)&ffc[j+8]); v_afdt = _mm512_permute4f128_ps(v_afdt,0); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(13260),(__m512i)v_afdt,78); v_afdt = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_afdt, _mm512_int2mask(21845),(__m512i)v_afdt,177); v_afdt = _mm512_mul_ps(v_adt,v_afdt); /* update magnetic field half time step */ /* zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I; */ /* zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I; */ v_zt4 = _mm512_load_ps((float *)&exyz[4*j]); v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt5 = bxyz[1+4*j] + dth*(dkx*zt1); */ /* zt6 = bxyz[2+4*j] - dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt2 = _mm512_load_ps((float *)&bxyz[4*j]); v_zt5 = _mm512_sub_ps(v_zt2,v_zt1); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt5,_mm512_int2mask(43690),v_zero, v_zt5); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j]; */ /* zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j]; */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_fmadd_ps(v_cdt,_mm512_sub_ps(v_zt1,v_zt2),v_zt4); v_zt2 = _mm512_load_ps((float *)&cu[4*j]); v_zt2 = _mm512_mask_mul_ps(v_zero,_mm512_int2mask(16191),v_afdt, v_zt2); v_zt4 = _mm512_sub_ps(v_zt1,v_zt2); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ v_zt3 = _mm512_mask_sub_ps(v_zt4,_mm512_int2mask(43690),v_zero, v_zt4); v_zt3 = (__m512)_mm512_shuffle_epi32((__m512i)v_zt3,177); /* exyz[4*j] = zero; */ /* exyz[1+4*j] = zt8; */ /* exyz[2+4*j] = zt9; */ /* zero out kx = 0 mode */ if (j==0) { v_zt4 = _mm512_mask_mov_ps(v_zt4,_mm512_int2mask(255),v_zero); _mm512_mask_store_ps((float *)&exyz[4*j], _mm512_int2mask(65280),v_zt4); } else { _mm512_store_ps((float *)&exyz[4*j],v_zt4); } /* ws += anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); */ v_zt6 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt4,v_zt4)); /* zt5 += dth*(dkx*zt1); */ /* zt6 -= dth*(dkx*zt2); */ v_zt1 = _mm512_mul_ps(v_dk1,v_zt3); v_zt2 = _mm512_mul_ps(v_dk2,v_zt3); v_zt1 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_zt1); v_zt2 = (__m512)_mm512_permutevar_epi32(v_m,(__m512i)v_zt2); v_zt1 = _mm512_mul_ps(v_dth,_mm512_sub_ps(v_zt1,v_zt2)); v_zt5 = _mm512_sub_ps(v_zt5,v_zt1); /* bxyz[4*j] = zero; */ /* bxyz[1+4*j] = zt5; */ /* bxyz[2+4*j] = zt6; */ /* zero out kx = 0 mode */ if (j==0) { v_zt5 = _mm512_mask_mov_ps(v_zt5,_mm512_int2mask(255),v_zero); _mm512_mask_store_ps((float *)&bxyz[4*j], _mm512_int2mask(65280),v_zt5); } else { _mm512_store_ps((float *)&bxyz[4*j],v_zt5); } /* wp += anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); */ v_zt7 = _mm512_mul_ps(v_anorm,_mm512_mul_ps(v_zt5,v_zt5)); /* convert to double precision before accumulating */ v_ws = _mm512_add_pd(v_ws,_mm512_cvtpslo_pd(v_zt6)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt6,78)); v_ws = _mm512_add_pd(v_ws,v_d); v_wp = _mm512_add_pd(v_wp,_mm512_cvtpslo_pd(v_zt7)); v_d = _mm512_cvtpslo_pd(_mm512_permute4f128_ps(v_zt7,78)); v_wp = _mm512_add_pd(v_wp,v_d); /* bxyz[4*(j+k1)] = zero; */ /* bxyz[1+4*(j+k1)] = zero; */ /* bxyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1)],v_zero); /* exyz[4*(j+k1)] = zero; */ /* exyz[1+4*(j+k1)] = zero; */ /* exyz[2+4*(j+k1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1)],v_zero); /* bxyz[4*(j+l1)] = zero; */ /* bxyz[1+4*(j+l1)] = zero; */ /* bxyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+l1)],v_zero); /* exyz[4*(j+l1)] = zero; */ /* exyz[1+4*(j+l1)] = zero; */ /* exyz[2+4*(j+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+l1)],v_zero); /* bxyz[4*(j+k1+l1)] = zero; */ /* bxyz[1+4*(j+k1+l1)] = zero; */ /* bxyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&bxyz[4*(j+k1+l1)],v_zero); /* exyz[4*(j+k1+l1)] = zero; */ /* exyz[1+4*(j+k1+l1)] = zero; */ /* exyz[2+4*(j+k1+l1)] = zero; */ _mm512_store_ps((float *)&exyz[4*(j+k1+l1)],v_zero); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { dkx = dnx*(float) j; afdt = adt*cimagf(ffc[j]); /* update magnetic field half time step */ zt1 = -cimagf(exyz[2+4*j]) + crealf(exyz[2+4*j])*_Complex_I; zt2 = -cimagf(exyz[1+4*j]) + crealf(exyz[1+4*j])*_Complex_I; zt5 = bxyz[1+4*j] + dth*(dkx*zt1); zt6 = bxyz[2+4*j] - dth*(dkx*zt2); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; zt8 = exyz[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j]; zt9 = exyz[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; exyz[4*j] = zero; exyz[1+4*j] = zt8; exyz[2+4*j] = zt9; at1 = anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); ws += (double) at1; zt5 += dth*(dkx*zt1); zt6 -= dth*(dkx*zt2); bxyz[4*j] = zero; bxyz[1+4*j] = zt5; bxyz[2+4*j] = zt6; at1 = anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); wp += (double) at1; bxyz[4*(j+k1)] = zero; bxyz[1+4*(j+k1)] = zero; bxyz[2+4*(j+k1)] = zero; exyz[4*(j+k1)] = zero; exyz[1+4*(j+k1)] = zero; exyz[2+4*(j+k1)] = zero; bxyz[4*(j+l1)] = zero; bxyz[1+4*(j+l1)] = zero; bxyz[2+4*(j+l1)] = zero; exyz[4*(j+l1)] = zero; exyz[1+4*(j+l1)] = zero; exyz[2+4*(j+l1)] = zero; bxyz[4*(j+k1+l1)] = zero; bxyz[1+4*(j+k1+l1)] = zero; bxyz[2+4*(j+k1+l1)] = zero; exyz[4*(j+k1+l1)] = zero; exyz[1+4*(j+k1+l1)] = zero; exyz[2+4*(j+k1+l1)] = zero; } bxyz[0] = zero; bxyz[1] = zero; bxyz[2] = zero; exyz[0] = zero; exyz[1] = zero; exyz[2]= zero; bxyz[4*k1] = zero; bxyz[1+4*k1] = zero; bxyz[2+4*k1] = zero; exyz[4*k1] = zero; exyz[1+4*k1] = zero; exyz[2+4*k1] = zero; bxyz[4*l1] = zero; bxyz[1+4*l1] = zero; bxyz[2+4*l1] = zero; exyz[4*l1] = zero; exyz[1+4*l1] = zero; exyz[2+4*l1] = zero; bxyz[4*(k1+l1)] = zero; bxyz[1+4*(k1+l1)] = zero; bxyz[2+4*(k1+l1)] = zero; exyz[4*(k1+l1)] = zero; exyz[1+4*(k1+l1)] = zero; exyz[2+4*(k1+l1)] = zero; /* *wf = ws*((float) nx)*((float) ny)*((float) nz); */ /* sum3 += ws; */ _mm512_store_pd(&dd[0],v_ws); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum3 += (ws + dd[0]); *wf = (sum1 + sum3)*((float) nx)*((float) ny)*((float) nz); /* *wm = c2*wp*((float) nx)*((float) ny)*((float) nz); */ /* sum4 += wp; */ _mm512_store_pd(&dd[0],v_wp); for (j = 1; j < 8; j++) { dd[0] += dd[j]; } sum4 += (wp + dd[0]); *wm = c2*(sum2 + sum4)*((float) nx)*((float) ny)*((float) nz); return; } /*--------------------------------------------------------------------*/ void ckncmemfield3(float complex fxyz[], float complex exyz[], float complex ffc[], int isign, int nx, int ny, int nz, int nxvh, int nyv, int nzv, int nxhd, int nyhd, int nzhd) { /* this subroutine either adds complex vector fields if isign > 0 or copies complex vector fields if isign < 0 includes additional smoothing requires KNC, fxyz, exyz, ffc need to be 64 byte aligned nxhd needs to be a multiple of 8 nxvh needs to be a multiple of 2 fxyz, exyz needs to have 4 components local data */ int j, k, l, nxh, nyh, nzh, nxhs, itn, k1, l1, kk, kj, ll, lj; int nxyhd, nxvyh; float at1; __m512 v_at1, v_zero, v_zt1, v_zt2; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nzh = 1 > nz/2 ? 1 : nz/2; nxhs = 2*(nxh/2); itn = 1 > nxhs ? 1 : nxhs; nxyhd = nxhd*nyhd; nxvyh = nxvh*nyv; v_zero = _mm512_setzero_ps(); /* add the fields */ if (isign > 0) { #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,kk,kj,ll,lj,at1,v_at1,v_zt1,v_zt2) for (l = 1; l < nzh; l++) { ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1; */ /* fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1; */ /* fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk+ll]); fxyz[4*(j+kj+lj)] += exyz[4*(j+kj+lj)]*at1; fxyz[1+4*(j+kj+lj)] += exyz[1+4*(j+kj+lj)]*at1; fxyz[2+4*(j+kj+lj)] += exyz[2+4*(j+kj+lj)]*at1; fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1; */ /* fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1; */ /* fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+lj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+ll]); fxyz[4*(j+lj)] += exyz[4*(j+lj)]*at1; fxyz[1+4*(j+lj)] += exyz[1+4*(j+lj)]*at1; fxyz[2+4*(j+lj)] += exyz[2+4*(j+lj)]*at1; fxyz[4*(j+k1+lj)] += exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] += exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] += exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } } l1 = nxvyh*nzh; #pragma omp parallel for private(j,k,k1,kk,kj,at1,v_at1,v_zt1,v_zt2) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1; */ /* fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1; */ /* fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2); /* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+kj+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); fxyz[4*(j+kj)] += exyz[4*(j+kj)]*at1; fxyz[1+4*(j+kj)] += exyz[1+4*(j+kj)]*at1; fxyz[2+4*(j+kj)] += exyz[2+4*(j+kj)]*at1; fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; fxyz[4*(j+kj+l1)] += exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] += exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] += exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*j] += exyz[4*j]*at1; */ /* fxyz[1+4*j] += exyz[1+4*j]*at1; */ /* fxyz[2+4*j] += exyz[2+4*j]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*j]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*j]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*j],v_zt2); /* fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_load_ps((float *)&fxyz[4*(j+k1+l1)]); v_zt2 = _mm512_fmadd_ps(v_zt1,v_at1,v_zt2); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j]); fxyz[4*j] += exyz[4*j]*at1; fxyz[1+4*j] += exyz[1+4*j]*at1; fxyz[2+4*j] += exyz[2+4*j]*at1; fxyz[4*(j+k1)] += exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] += exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] += exyz[2+4*(j+k1)]*at1; fxyz[4*(j+l1)] += exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] += exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] += exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] += exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] += exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] += exyz[2+4*(j+k1+l1)]*at1; } } /* copy the fields */ else if (isign < 0) { #pragma omp parallel { #pragma omp for nowait \ private(j,k,l,k1,l1,kk,kj,ll,lj,at1,v_at1,v_zt1,v_zt2) for (l = 1; l < nzh; l++) { ll = nxyhd*l; lj = nxvyh*l; l1 = nxvyh*nz - lj; for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+ll+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1; */ /* fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1; */ /* fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk+ll]); fxyz[4*(j+kj+lj)] = exyz[4*(j+kj+lj)]*at1; fxyz[1+4*(j+kj+lj)] = exyz[1+4*(j+kj+lj)]*at1; fxyz[2+4*(j+kj+lj)] = exyz[2+4*(j+kj+lj)]*at1; fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+ll]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+ll]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1; */ /* fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1; */ /* fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+lj)],v_zt2); /* fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; */ /* fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; */ /* fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+lj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+lj)],v_zt2); /* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+ll]); fxyz[4*(j+lj)] = exyz[4*(j+lj)]*at1; fxyz[1+4*(j+lj)] = exyz[1+4*(j+lj)]*at1; fxyz[2+4*(j+lj)] = exyz[2+4*(j+lj)]*at1; fxyz[4*(j+k1+lj)] = exyz[4*(j+k1+lj)]*at1; fxyz[1+4*(j+k1+lj)] = exyz[1+4*(j+k1+lj)]*at1; fxyz[2+4*(j+k1+lj)] = exyz[2+4*(j+k1+lj)]*at1; fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } } l1 = nxvyh*nzh; #pragma omp parallel for private(j,k,k1,kk,kj,at1,v_at1,v_zt1,v_zt2) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j+kk]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero, _mm512_int2mask(15),(float *)&ffc[j+kk]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+kk+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1; */ /* fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1; */ /* fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj)],v_zt2); /* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; */ /* fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; */ /* fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+kj+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+kj+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j+kk]); fxyz[4*(j+kj)] = exyz[4*(j+kj)]*at1; fxyz[1+4*(j+kj)] = exyz[1+4*(j+kj)]*at1; fxyz[2+4*(j+kj)] = exyz[2+4*(j+kj)]*at1; fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; fxyz[4*(j+kj+l1)] = exyz[4*(j+kj+l1)]*at1; fxyz[1+4*(j+kj+l1)] = exyz[1+4*(j+kj+l1)]*at1; fxyz[2+4*(j+kj+l1)] = exyz[2+4*(j+kj+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } k1 = nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(ffc[j]); */ v_at1 = _mm512_mask_loadunpacklo_ps(v_zero,_mm512_int2mask(15), (float *)&ffc[j]); v_at1 = _mm512_mask_loadunpackhi_ps(v_at1, _mm512_int2mask(15),(float *)&ffc[j+8]); v_at1 = _mm512_permute4f128_ps(v_at1,0); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(13260),(__m512i)v_at1,78); v_at1 = (__m512)_mm512_mask_shuffle_epi32((__m512i)v_at1, _mm512_int2mask(21845),(__m512i)v_at1,177); /* fxyz[4*j] = exyz[4*j]*at1; */ /* fxyz[1+4*j] = exyz[1+4*j]*at1; */ /* fxyz[2+4*j] = exyz[2+4*j]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*j]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*j],v_zt2); /* fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; */ /* fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; */ /* fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1)],v_zt2); /* fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; */ /* fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; */ /* fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+l1)],v_zt2); /* fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; */ /* fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; */ /* fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; */ v_zt1 = _mm512_load_ps((float *)&exyz[4*(j+k1+l1)]); v_zt2 = _mm512_mul_ps(v_zt1,v_at1); _mm512_store_ps((float *)&fxyz[4*(j+k1+l1)],v_zt2); } /* loop over remaining elements */ for (j = itn; j < nxh; j++) { at1 = cimagf(ffc[j]); fxyz[4*j] = exyz[4*j]*at1; fxyz[1+4*j] = exyz[1+4*j]*at1; fxyz[2+4*j] = exyz[2+4*j]*at1; fxyz[4*(j+k1)] = exyz[4*(j+k1)]*at1; fxyz[1+4*(j+k1)] = exyz[1+4*(j+k1)]*at1; fxyz[2+4*(j+k1)] = exyz[2+4*(j+k1)]*at1; fxyz[4*(j+l1)] = exyz[4*(j+l1)]*at1; fxyz[1+4*(j+l1)] = exyz[1+4*(j+l1)]*at1; fxyz[2+4*(j+l1)] = exyz[2+4*(j+l1)]*at1; fxyz[4*(j+k1+l1)] = exyz[4*(j+k1+l1)]*at1; fxyz[1+4*(j+k1+l1)] = exyz[1+4*(j+k1+l1)]*at1; fxyz[2+4*(j+k1+l1)] = exyz[2+4*(j+k1+l1)]*at1; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rmxy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in x and y is performed f[i][m][n] = (1/nx*ny*nz)*sum(f[i][k][j]*exp(-sqrt(-1)*2pi*n*j/nx)* exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in x and y is performed f[l][k][j] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 8 written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhyd; int i, j, k, l, n, nn, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nss, nxhs, nxhhs, itn; float ani; float complex t1, t2, t3; __m512i v_j, v_kmr, v_m, v_n, v_it; __m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani; v_j = _mm512_set_epi32(7,7,6,6,5,5,4,4,3,3,2,2,1,1,0,0); if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhyd = nxhd*nyd; nxhs = 8*(nxh/8); nxhhs = 8*(nxhh/8); itn = 1 > nxhhs ? 1 : nxhhs; v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0); v_n = _mm512_set_epi32(1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L180; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,ani,t1,t2,t3, \ v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 8*(ns/8); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (i = 0; i < ny; i++) { joff = nxhd*i + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nss; j+=8) { /* t1 = sct[kmr*j]; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k2+joff],v_t4); /* f[j+k1+joff] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = sct[kmr*j]; t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); v_ani = _mm512_set1_ps(ani); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd*k + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhhs; j+=8) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845), v_zero,v_t3); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[nxh-j+joff-7]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[nxh-j+joff+1]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[j+joff] = ani*(t1 + t2); */ v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2)); /* f[nxh-j+joff] = ani*conjf(t1 - t2); */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); v_t4 = _mm512_mul_ps(v_ani,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[j+joff], _mm512_int2mask(65532),v_t3); _mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7], _mm512_int2mask(16383),v_t4); _mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1], _mm512_int2mask(16383),v_t4); } else { _mm512_store_ps((float *)&f[j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4); _mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = ani*(t1 + t2); f[nxh-j+joff] = ani*conjf(t1 - t2); } } ani = 2.0*ani; for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = ani*conjf(f[nxhh+joff]); f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[i+k1]); /* f[i+k1] = f[i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[i+joff]); _mm512_store_ps((float *)&f[i+k1],v_t2); /* f[i+joff] = t1; */ _mm512_store_ps((float *)&f[i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j2],v_t4); /* f[i+j1] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = f[k1]; f[k1] = 0.5*(cimagf(f[joff] + t1) + crealf(f[joff] - t1)*_Complex_I); f[joff] = 0.5*(crealf(f[joff] + t1) + cimagf(f[joff] - t1)*_Complex_I); } } return; /* forward fourier transform */ L180: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,j1,j2,nn,joff,t1,t2,t3,v_it, \ v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd*k; k1 = nxhd*ny - joff + nn; joff += nn; t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I; f[k1] = conjf(f[joff] - t1); f[joff] += t1; } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1 + nn; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[i+k1]); /* f[i+k1] = f[i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[i+joff]); _mm512_store_ps((float *)&f[i+k1],v_t2); /* f[i+joff] = t1; */ _mm512_store_ps((float *)&f[i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+k1]; f[i+k1] = f[i+joff]; f[i+joff] = t1; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd*(j + k1) + nn; j2 = nxhd*(j + k2) + nn; t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j2],v_t4); /* f[i+j1] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd*k + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nxhhs; j+=8) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[nxh-j+joff-7]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[nxh-j+joff+1]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[j+joff] = t1 + t2; */ v_t3 = _mm512_add_ps(v_t1,v_t2); /* f[nxh-j+joff] = conjf(t1 - t2); */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[j+joff], _mm512_int2mask(65532),v_t3); _mm512_mask_packstorelo_ps((float *)&f[nxh-j+joff-7], _mm512_int2mask(16383),v_t4); _mm512_mask_packstorehi_ps((float *)&f[nxh-j+joff+1], _mm512_int2mask(16383),v_t4); } else { _mm512_store_ps((float *)&f[j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[nxh-j+joff-7],v_t4); _mm512_packstorehi_ps((float *)&f[nxh-j+joff+1],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = t1 + t2; f[nxh-j+joff] = conjf(t1 - t2); } } for (k = 0; k < ny; k++) { joff = nxhd*k + nn; f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd*i + nn; t1 = f[j1+joff]; f[j1+joff] = f[j+joff]; f[j+joff] = t1; } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 8*(ns/8); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (i = 0; i < ny; i++) { joff = nxhd*i + nn; /* vector loop over elements in blocks of 8 */ for (j = 0; j < nss; j+=8) { /* t1 = conjf(sct[kmr*j]); */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690), v_zero,v_t1); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm512_load_ps((float *)&f[j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k2+joff],v_t4); /* f[j+k1+joff] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = conjf(sct[kmr*j]); t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rmz(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of a three dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, an inverse fourier transform in z is performed f[l][k][j] = sum(f[i][k][j]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, a forward fourier transform in z is performed f[i][m][n] = sum(f[l][m][n]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f nyd,nzd = second and third dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 8 written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhyd, ioff; int i, j, k, l, n, ll, j1, j2, k1, k2, l1, ns, ns2, km, kmr, i0, i1; int nss, nxhs; float complex t1, t2; __m512 v_zero, v_t1, v_t2, v_t3, v_t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhyd = nxhd*nyd; nxhs = 8*(nxh/8); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L90; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \ v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[i+i1]); /* f[i+i1] = f[i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[i+i0]); _mm512_store_ps((float *)&f[i+i1],v_t2); /* f[i+i0] = t1; */ _mm512_store_ps((float *)&f[i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+i1] = f[i+i0] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i1],v_t4); /* f[i+i0] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; t1 = f[l1]; f[l1] = 0.5*(cimagf(f[ll] + t1) + crealf(f[ll] - t1)*_Complex_I); f[ll] = 0.5*(crealf(f[ll] + t1) + cimagf(f[ll] - t1)*_Complex_I); } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = f[i1]; f[i1] = 0.5*(cimagf(f[i0] + t1) + crealf(f[i0] - t1)*_Complex_I); f[i0] = 0.5*(crealf(f[i0] + t1) + cimagf(f[i0] - t1)*_Complex_I); } } return; /* forward fourier transform */ L90: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; t1 = cimagf(f[l1]) + crealf(f[l1])*_Complex_I; f[l1] = conjf(f[ll] - t1); f[ll] += t1; } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd*nyh; i0 = i1 + ll; i1 += l1; t1 = cimagf(f[i1]) + crealf(f[i1])*_Complex_I; f[i1] = conjf(f[i0] - t1); f[i0] += t1; } } /* bit-reverse array elements in z */ #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2, \ v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd*n; for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t1 = f[i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[i+i1]); /* f[i+i1] = f[i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[i+i0]); _mm512_store_ps((float *)&f[i+i1],v_t2); /* f[i+i0] = t1; */ _mm512_store_ps((float *)&f[i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[i+i1]; f[i+i1] = f[i+i0]; f[i+i0] = t1; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 8 */ for (i = 0; i < nxhs; i+=8) { /* t2 = t1*f[i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[i+i1] = f[i+i0] - t2; */ v_t3 = _mm512_load_ps((float *)&f[i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i1],v_t4); /* f[i+i0] += t2; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[i+i1]; f[i+i1] = f[i+i0] - t2; f[i+i0] += t2; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rm3xy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nzi, int nzp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the x-y part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of z, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in x and y are performed f[i][m][n][0:2] = (1/nx*ny*nz)*sum(f[i][k][j][0:2]* exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, three forward fourier transforms in x and y are performed f[l][k][j][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nzi = initial z index used nzp = number of z indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2] = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 2 f needs to have 4 components written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, nxhh, ny, nyh; int nz, nxyz, nxhyz, nzt, nrx, nry, nrxb, nryb, nxhd4, nxhyd; int i, j, k, l, n, nn, jj, j1, j2, k1, k2, ns, ns2, km, kmr, joff; int nss, nxhs, nxhhs, itn; float at1, at2, ani; float complex t1, t2, t3, t4; __m512i v_j, v_kmr, v_m, v_n, v_l, v_it; __m512 v_zero, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani, v_half; v_j = _mm512_set_epi32(1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0); if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nzt = nzi + nzp - 1; nxhd4 = 4*nxhd; nxhyd = nxhd4*nyd; nxhs = 2*(nxh/2); nxhhs = 2*(nxhh/2); itn = 1 > nxhhs ? 1 : nxhhs; v_m = _mm512_set_epi32(1,0,1,0,1,0,1,0,1,0,1,0,1,0,1,0); v_n = _mm512_set_epi32(7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); v_half = _mm512_set1_ps(0.5f); if (isign > 0) goto L230; /* inverse fourier transform */ nrxb = nxhyz/nxh; nrx = nxyz/nxh; nryb = nxhyz/ny; nry = nxyz/ny; v_l = _mm512_set_epi32(15,11,14,10,13,9,12,8,7,3,6,2,5,1,4,0); #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \ ani,t1,t2,t3,t4,v_it,v_kmr,v_t1,v_ani,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = cimagf(f[2+4*j+joff]); */ /* at2 = crealf(f[2+4*j+joff]); */ /* f[2+4*j+joff] = crealf(f[1+4*j+joff]) */ /* + crealf(f[3+4*j+joff])*_Complex_I; */ /* f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; */ /* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */ v_t1 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1); _mm512_store_ps((float *)&f[4*j+joff],v_t1); } /* loop over remaining elements */ for (j = nxhs; j < nxh; j++) { at1 = cimagf(f[2+4*j+joff]); at2 = crealf(f[2+4*j+joff]); f[2+4*j+joff] = crealf(f[1+4*j+joff]) + crealf(f[3+4*j+joff])*_Complex_I; f[1+4*j+joff] = cimagf(f[4*j+joff]) + at1*_Complex_I; f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; } } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm512_mask_loadunpacklo_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff]); v_t1 = _mm512_mask_loadunpackhi_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff+8]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm512_mask_loadunpacklo_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff]); v_t2 = _mm512_mask_loadunpackhi_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff+8]); _mm512_mask_packstorelo_ps((float *)&f[4*j1+joff], _mm512_int2mask(255),v_t2); _mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8], _mm512_int2mask(255),v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm512_mask_packstorelo_ps((float *)&f[4*j+joff], _mm512_int2mask(255),v_t1); _mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8], _mm512_int2mask(255),v_t1); } } } /* first transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 2*(ns/2); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = sct[kmr*j]; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ /* t4 = t1*f[2+4*j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k2+joff],v_t4); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = sct[kmr*j]; t2 = t1*f[4*j+k2+joff]; t3 = t1*f[1+4*j+k2+joff]; t4 = t1*f[2+4*j+k2+joff]; f[4*j+k2+joff] = f[4*j+k1+joff] - t2; f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; f[4*j+k1+joff] += t2; f[1+4*j+k1+joff] += t3; f[2+4*j+k1+joff] += t4; } } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxyz/nx; ani = 0.5/(((float) nx)*((float) ny)*((float) nz)); v_ani = _mm512_set1_ps(ani); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21845), v_zero,v_t3); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* for (jj = 0; jj < 3; jj++) { */ /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff+8]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[jj+4*j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[jj+4*j+joff] = ani*(t1 + t2); */ v_t3 = _mm512_mul_ps(v_ani,_mm512_add_ps(v_t1,v_t2)); /* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */ /* } */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); v_t4 = _mm512_mul_ps(v_ani,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[4*j+joff], _mm512_int2mask(65280),v_t3); _mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff], _mm512_int2mask(255),v_t4); _mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8], _mm512_int2mask(255),v_t4); } else { _mm512_store_ps((float *)&f[4*j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4); _mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+4*(nxh-j)+joff]); t1 = f[jj+4*j+joff] + t2; t2 = (f[jj+4*j+joff] - t2)*t3; f[jj+4*j+joff] = ani*(t1 + t2); f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); } } } /* ani = 2.0*ani; */ v_ani = _mm512_add_ps(v_ani,v_ani); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* for (jj = 0; jj < 3; jj++) { */ /* f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]); */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[4*nxhh+joff]); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero, v_t1); v_t1 = _mm512_mul_ps(v_ani,v_t1); _mm512_mask_store_ps((float *)&f[4*nxhh+joff], _mm512_int2mask(63),v_t1); /* f[jj+joff] = ani*((crealf(f[jj+joff]) */ /* + cimagf(f[jj+joff])) */ /* + (crealf(f[jj+joff]) */ /* - cimagf(f[jj+joff]))*_Complex_I); */ /* } */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2); v_t3 = _mm512_mul_ps(v_ani,v_t3); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t3); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd4*k1 + nn; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+k1]); /* f[4*i+k1] = f[4*i+joff]; */ /* f[1+4*i+k1] = f[1+4*i+joff]; */ /* f[2+4*i+k1] = f[2+4*i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+joff]); _mm512_store_ps((float *)&f[4*i+k1],v_t2); /* f[4*i+joff] = t1; */ /* f[1+4*i+joff] = t2; */ /* f[2+4*i+joff] = t3; */ _mm512_store_ps((float *)&f[4*i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+k1]; t2 = f[1+4*i+k1]; t3 = f[2+4*i+k1]; f[4*i+k1] = f[4*i+joff]; f[1+4*i+k1] = f[1+4*i+joff]; f[2+4*i+k1] = f[2+4*i+joff]; f[4*i+joff] = t1; f[1+4*i+joff] = t2; f[2+4*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd4*(j + k1) + nn; j2 = nxhd4*(j + k2) + nn; t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ /* t4 = t1*f[2+4*i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j2],v_t4); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ /* f[2+4*i+j1] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+j2]; t3 = t1*f[1+4*i+j2]; t4 = t1*f[2+4*i+j2]; f[4*i+j2] = f[4*i+j1] - t2; f[1+4*i+j2] = f[1+4*i+j1] - t3; f[2+4*i+j2] = f[2+4*i+j1] - t4; f[4*i+j1] += t2; f[1+4*i+j1] += t3; f[2+4*i+j1] += t4; } } } ns = ns2; } /* unscramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd4*k; k1 = nxhd4*ny - joff + nn; joff += nn; /* for (jj = 0; jj < 3; jj++) { */ /* t1 = f[jj+k1]; */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[k1]); /* f[jj+k1] = 0.5*(cimagf(f[jj+joff] + t1) */ /* + crealf(f[jj+joff] - t1)*_Complex_I); */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(42),v_t2,v_t1); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(21),v_t2,v_t1); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); v_t3 = _mm512_mul_ps(v_half,v_t3); _mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3); /* f[jj+joff] = 0.5*(crealf(f[jj+joff] + t1) */ /* + cimagf(f[jj+joff] - t1)*_Complex_I); */ /* } */ v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t2,v_t1); v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(21),v_t2,v_t1); v_t2 = _mm512_mul_ps(v_half,v_t2); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63),v_t2); } } return; /* forward fourier transform */ L230: nryb = nxhyz/ny; nry = nxyz/ny; nrxb = nxhyz/nxh; nrx = nxyz/nxh; v_l = _mm512_set_epi32(15,13,11,9,14,12,10,8,7,5,3,1,6,4,2,0); #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,nss,km,kmr,k1,k2,jj,j1,j2,nn,joff,at1,at2, \ t1,t2,t3,t4,v_it,v_kmr,v_t1,v_t2,v_t3,v_t4,v_t5) for (n = nzi-1; n < nzt; n++) { nn = nxhyd*n; /* scramble modes kx = 0, nx/2 */ for (k = 1; k < nyh; k++) { joff = nxhd4*k; k1 = nxhd4*ny - joff + nn; joff += nn; /* for (jj = 0; jj < 3; jj++) { */ /* t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[k1]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,177); /* f[jj+k1] = conjf(f[jj+joff] - t1); */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(63),v_t2,v_t1); v_t3 = _mm512_mask_sub_ps(v_t3,_mm512_int2mask(42), v_zero,v_t3); _mm512_mask_store_ps((float *)&f[k1],_mm512_int2mask(63),v_t3); /* f[jj+joff] += t1; */ /* } */ v_t2 = _mm512_mask_add_ps(v_t2,_mm512_int2mask(63),v_t2,v_t1); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t2); } /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd4*k1 + nn; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+k1]); /* f[4*i+k1] = f[4*i+joff]; */ /* f[1+4*i+k1] = f[1+4*i+joff]; */ /* f[2+4*i+k1] = f[2+4*i+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+joff]); _mm512_store_ps((float *)&f[4*i+k1],v_t2); /* f[4*i+joff] = t1; */ /* f[1+4*i+joff] = t2; */ /* f[2+4*i+joff] = t3; */ _mm512_store_ps((float *)&f[4*i+joff],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+k1]; t2 = f[1+4*i+k1]; t3 = f[2+4*i+k1]; f[4*i+k1] = f[4*i+joff]; f[1+4*i+k1] = f[1+4*i+joff]; f[2+4*i+k1] = f[2+4*i+joff]; f[4*i+joff] = t1; f[1+4*i+joff] = t2; f[2+4*i+joff] = t3; } } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhd4*(j + k1) + nn; j2 = nxhd4*(j + k2) + nn; t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ /* t4 = t1*f[2+4*i+j2]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+j2]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+j1]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j2],v_t4); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ /* f[2+4*i+j1] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+j1],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+j2]; t3 = t1*f[1+4*i+j2]; t4 = t1*f[2+4*i+j2]; f[4*i+j2] = f[4*i+j1] - t2; f[1+4*i+j2] = f[1+4*i+j1] - t3; f[2+4*i+j2] = f[2+4*i+j1] - t4; f[4*i+j1] += t2; f[1+4*i+j1] += t3; f[2+4*i+j1] += t4; } } } ns = ns2; } /* scramble coefficients */ kmr = nxyz/nx; v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t3 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,177); /* for (jj = 0; jj < 3; jj++) { */ /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm512_loadunpacklo_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff]); v_t2 = _mm512_loadunpackhi_ps(v_t2, (float *)&f[4*(nxh-j-1)+joff+8]); /* reverse data */ v_t2 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t2); v_t2 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(43690), v_zero,v_t2); /* t1 = f[jj+4*j+joff] + t2; */ v_t4 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = _mm512_add_ps(v_t4,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm512_sub_ps(v_t4,v_t2); v_t5 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,160); v_t5 = _mm512_mul_ps(v_t2,v_t5); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t3,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t5,v_t4); /* f[jj+4*j+joff] = t1 + t2; */ v_t3 = _mm512_add_ps(v_t1,v_t2); /* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */ /* } */ v_t4 = _mm512_sub_ps(v_t1,v_t2); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(43690), v_zero,v_t4); /* reverse data */ v_t4 = (__m512)_mm512_permutevar_epi32(v_n,(__m512i)v_t4); if (j==0) { _mm512_mask_store_ps((float *)&f[4*j+joff], _mm512_int2mask(65280),v_t3); _mm512_mask_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff], _mm512_int2mask(255),v_t4); _mm512_mask_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8], _mm512_int2mask(255),v_t4); } else { _mm512_store_ps((float *)&f[4*j+joff],v_t3); _mm512_packstorelo_ps((float *)&f[4*(nxh-j-1)+joff],v_t4); _mm512_packstorehi_ps((float *)&f[4*(nxh-j-1)+joff+8],v_t4); } } /* loop over remaining elements */ for (j = itn; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; for (jj = 0; jj < 3; jj++) { t2 = conjf(f[jj+4*(nxh-j)+joff]); t1 = f[jj+4*j+joff] + t2; t2 = (f[jj+4*j+joff] - t2)*t3; f[jj+4*j+joff] = t1 + t2; f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); } } } for (k = 0; k < ny; k++) { joff = nxhd4*k + nn; /* for (jj = 0; jj < 3; jj++) { */ /* f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]); */ v_t1 = _mm512_mask_load_ps(v_t1,_mm512_int2mask(63), (float *)&f[4*nxhh+joff]); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(42),v_zero, v_t1); v_t1 = _mm512_add_ps(v_t1,v_t1); _mm512_mask_store_ps((float *)&f[4*nxhh+joff], _mm512_int2mask(63),v_t1); /* f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) */ /* + (crealf(f[jj+joff]) */ /* - cimagf(f[jj+joff]))*_Complex_I; */ /* } */ v_t2 = _mm512_mask_load_ps(v_t2,_mm512_int2mask(63), (float *)&f[joff]); v_t1 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t3 = _mm512_mask_sub_ps(v_t2,_mm512_int2mask(42),v_t1,v_t2); v_t3 = _mm512_mask_add_ps(v_t3,_mm512_int2mask(21),v_t1,v_t2); _mm512_mask_store_ps((float *)&f[joff],_mm512_int2mask(63), v_t3); } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm512_mask_loadunpacklo_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff]); v_t1 = _mm512_mask_loadunpackhi_ps(v_t1, _mm512_int2mask(255),(float *)&f[4*j1+joff+8]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm512_mask_loadunpacklo_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff]); v_t2 = _mm512_mask_loadunpackhi_ps(v_t2, _mm512_int2mask(255),(float *)&f[4*j+joff+8]); _mm512_mask_packstorelo_ps((float *)&f[4*j1+joff], _mm512_int2mask(255),v_t2); _mm512_mask_packstorehi_ps((float *)&f[4*j1+joff+8], _mm512_int2mask(255),v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm512_mask_packstorelo_ps((float *)&f[4*j+joff], _mm512_int2mask(255),v_t1); _mm512_mask_packstorehi_ps((float *)&f[4*j+joff+8], _mm512_int2mask(255),v_t1); } } } /* finally transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; nss = 2*(ns/2); v_kmr = _mm512_set1_epi32(2*kmr); for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = conjf(sct[kmr*j]); */ v_it = _mm512_add_epi32(_mm512_set1_epi32(j),v_j); v_it = _mm512_fmadd_epi32(v_kmr,v_it,v_m); v_t1 = _mm512_i32gather_ps(v_it,(float *)sct,4); v_t1 = _mm512_mask_sub_ps(v_t1,_mm512_int2mask(43690), v_zero,v_t1); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ /* t4 = t1*f[2+4*j+k2+joff]; */ v_t2 = _mm512_load_ps((float *)&f[4*j+k2+joff]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*j+k1+joff]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k2+joff],v_t4); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*j+k1+joff],v_t4); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = conjf(sct[kmr*j]); t2 = t1*f[4*j+k2+joff]; t3 = t1*f[1+4*j+k2+joff]; t4 = t1*f[2+4*j+k2+joff]; f[4*j+k2+joff] = f[4*j+k1+joff] - t2; f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; f[4*j+k1+joff] += t2; f[1+4*j+k1+joff] += t3; f[2+4*j+k1+joff] += t4; } } } ns = ns2; } /* swap complex components */ for (i = 0; i < ny; i++) { joff = nxhd4*i + nn; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* f[3+4*j+joff] = cimagf(f[2+4*j+joff]) */ /* + cimagf(f[3+4*j+joff])*_Complex_I; */ /* at1 = crealf(f[2+4*j+joff]); */ /* f[2+4*j+joff] = cimagf(f[4*j+joff]) */ /* + cimagf(f[1+4*j+joff])*_Complex_I; */ /* at2 = crealf(f[1+4*j+joff]); */ /* f[1+4*j+joff] = at1 + 0.0*_Complex_I; */ /* f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; */ v_t1 = _mm512_load_ps((float *)&f[4*j+joff]); v_t1 = (__m512)_mm512_permutevar_epi32(v_l,(__m512i)v_t1); _mm512_store_ps((float *)&f[4*j+joff],v_t1); } /* loop over remaining elements */ for (j = nxhs; j < nxh; j++) { f[3+4*j+joff] = cimagf(f[2+4*j+joff]) + cimagf(f[3+4*j+joff])*_Complex_I; at1 = crealf(f[2+4*j+joff]); f[2+4*j+joff] = cimagf(f[4*j+joff]) + cimagf(f[1+4*j+joff])*_Complex_I; at2 = crealf(f[1+4*j+joff]); f[1+4*j+joff] = at1 + 0.0*_Complex_I; f[4*j+joff] = crealf(f[4*j+joff]) + at2*_Complex_I; } } } return; } /*--------------------------------------------------------------------*/ void ckncfft3rm3z(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nyi, int nyp, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* this subroutine performs the z part of 3 three dimensional complex to real fast fourier transforms and their inverses, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny*nz indx/indy/indz = exponent which determines length in x/y/z direction, where nx=2**indx, ny=2**indy, nz=2**indz if isign = -1, three inverse fourier transforms in z are performed f[l][k][j][0:2] = sum(f[i][k][j][0:2]*exp(-sqrt(-1)*2pi*l*i/nz)) if isign = 1, three forward fourier transforms in z are performed f[i][m][n][0:2] = sum(f[l][m][n][0:2]*exp(sqrt(-1)*2pi*l*i/nz)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = second dimension of f nyd,nzd = third and fourth dimensions of f nxhyzd = maximum of (nx/2,ny,nz) nxyzhd = maximum of (nx,ny,nz)/2 fourier coefficients are stored as follows: f[l][k][j][0:2] = real, imaginary part of mode j,k,l where 0 <= j < nx/2, 0 <= k < ny, 0 <= l < nz, except for f[l][k][0][0:2], = real, imaginary part of mode nx/2,k,l, where ny/2+1 <= k < ny and 0 <= l < nz, and f[l][0][0][0:2] = real, imaginary part of mode nx/2,0,l, f[l][ny/2][0][0:2] = real, imaginary part mode nx/2,ny/2,l, where nz/2+1 <= l < nz, and imag(f[0][0][0][0:2]) = real part of mode nx/2,0,0 imag(f[0][ny/2][0][0:2]) = real part of mode nx/2,ny/2,0 imag(f[nz/2][0][0][0:2]) = real part of mode nx/2,0,nz/2 imag(f[nz/2][ny/2][0][0:2]) = real part of mode nx/2,ny/2,nz/2 using jpl storage convention, as described in: E. Huang, P. C. Liewer, V. K. Decyk, and R. D. Ferraro, "Concurrent Three-Dimensional Fast Fourier Transform Algorithms for Coarse-Grained Distributed Memory Parallel Computers," Caltech CRPC Report 217-50, December 1993. requires KNC, f needs to be 64 byte aligned nxhd need to be a multiple of 2 f needs to have 4 components written by viktor k. decyk, ucla local data */ int indx1, ndx1yz, nx, nxh, ny, nyh; int nz, nzh, nxyz, nxhyz, nyt, nrz, nrzb, nxhd4, nxhyd, ioff; int i, j, k, l, n, ll, jj, j1, j2, k1, k2, l1, ns, ns2, km, kmr; int i0, i1; int nxhs; float complex t1, t2, t3, t4; __m512 v_zero, v_t1, v_t2, v_t3, v_t4; if (isign==0) return; indx1 = indx - 1; ndx1yz = indx1 > indy ? indx1 : indy; ndx1yz = ndx1yz > indz ? ndx1yz : indz; nx = 1L<<indx; nxh = nx/2; ny = 1L<<indy; nyh = ny/2; nz = 1L<<indz; nzh = nz/2; nxyz = nx > ny ? nx : ny; nxyz = nxyz > nz ? nxyz : nz; nxhyz = 1L<<ndx1yz; nyt = nyi + nyp - 1; nxhd4 = 4*nxhd; nxhyd = nxhd4*nyd; nxhs = 2*(nxh/2); v_zero = _mm512_setzero_ps(); v_t1 = _mm512_setzero_ps(); v_t2 = _mm512_setzero_ps(); v_t3 = _mm512_setzero_ps(); v_t4 = _mm512_setzero_ps(); if (isign > 0) goto L110; /* inverse fourier transform */ nrzb = nxhyz/nz; nrz = nxyz/nz; #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4,v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd4*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+i1]; */ /* t2 = f[1+4*i+i1]; */ /* t3 = f[2+4*i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+i1]); /* f[4*i+i1] = f[4*i+i0]; */ /* f[1+4*i+i1] = f[1+4*i+i0]; */ /* f[2+4*i+i1] = f[2+4*i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i0]); _mm512_store_ps((float *)&f[4*i+i1],v_t2); /* f[4*i+i0] = t1; */ /* f[1+4*i+i0] = t2; */ /* f[2+4*i+i0] = t3; */ _mm512_store_ps((float *)&f[4*i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+i1]; t2 = f[1+4*i+i1]; t3 = f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0]; f[1+4*i+i1] = f[1+4*i+i0]; f[2+4*i+i1] = f[2+4*i+i0]; f[4*i+i0] = t1; f[1+4*i+i0] = t2; f[2+4*i+i0] = t3; } } } /* finally transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = sct[kmr*j]; v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+i1]; */ /* t3 = t1*f[1+4*i+i1]; */ /* t4 = t1*f[2+4*i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+i1] = f[4*i+i0] - t2; */ /* f[1+4*i+i1] = f[1+4*i+i0] - t3; */ /* f[2+4*i+i1] = f[2+4*i+i0] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i1],v_t4); /* f[4*i+i0] += t2; */ /* f[1+4*i+i0] += t3; */ /* f[2+4*i+i0] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+i1]; t3 = t1*f[1+4*i+i1]; t4 = t1*f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0] - t2; f[1+4*i+i1] = f[1+4*i+i0] - t3; f[2+4*i+i1] = f[2+4*i+i0] - t4; f[4*i+i0] += t2; f[1+4*i+i0] += t3; f[2+4*i+i0] += t4; } } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; for (jj = 0; jj < 3; jj++) { t1 = f[jj+l1]; f[jj+l1] = 0.5*(cimagf(f[jj+ll] + t1) + crealf(f[jj+ll] - t1)*_Complex_I); f[jj+ll] = 0.5*(crealf(f[jj+ll] + t1) + cimagf(f[jj+ll] - t1)*_Complex_I); } } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd4*nyh; i0 = i1 + ll; i1 += l1; for (jj = 0; jj < 3; jj++) { t1 = f[jj+i1]; f[jj+i1] = 0.5*(cimagf(f[jj+i0] + t1) + crealf(f[jj+i0] - t1)*_Complex_I); f[jj+i0] = 0.5*(crealf(f[jj+i0] + t1) + cimagf(f[jj+i0] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L110: nrzb = nxhyz/nz; nrz = nxyz/nz; /* scramble modes kx = 0, nx/2 */ if (nyi==1) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+l1]) + crealf(f[jj+l1])*_Complex_I; f[jj+l1] = conjf(f[jj+ll] - t1); f[jj+ll] += t1; } } } if ((nyi <= (nyh+1)) && (nyt >= (nyh+1))) { for (n = 1; n < nzh; n++) { ll = nxhyd*n; l1 = nxhyd*nz - ll; i1 = nxhd4*nyh; i0 = i1 + ll; i1 += l1; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+i1]) + crealf(f[jj+i1])*_Complex_I; f[jj+i1] = conjf(f[jj+i0] - t1); f[jj+i0] += t1; } } } #pragma omp parallel for \ private(i,j,k,l,n,ns,ns2,km,kmr,k1,k2,j1,j2,ll,l1,i0,i1,ioff,t1,t2,t3, \ t4,v_t1,v_t2,v_t3,v_t4) for (n = nyi-1; n < nyt; n++) { ioff = nxhd4*n; /* bit-reverse array elements in z */ for (l = 0; l < nz; l++) { ll = nxhyd*l; l1 = (mixup[l] - 1)/nrzb; if (l < l1) { l1 = nxhyd*l1; i0 = ioff + ll; i1 = ioff + l1; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t1 = f[4*i+i1]; */ /* t2 = f[1+4*i+i1]; */ /* t3 = f[2+4*i+i1]; */ v_t1 = _mm512_load_ps((float *)&f[4*i+i1]); /* f[4*i+i1] = f[4*i+i0]; */ /* f[1+4*i+i1] = f[1+4*i+i0]; */ /* f[2+4*i+i1] = f[2+4*i+i0]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i0]); _mm512_store_ps((float *)&f[4*i+i1],v_t2); /* f[4*i+i0] = t1; */ /* f[1+4*i+i0] = t2; */ /* f[2+4*i+i0] = t3; */ _mm512_store_ps((float *)&f[4*i+i0],v_t1); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t1 = f[4*i+i1]; t2 = f[1+4*i+i1]; t3 = f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0]; f[1+4*i+i1] = f[1+4*i+i0]; f[2+4*i+i1] = f[2+4*i+i0]; f[4*i+i0] = t1; f[1+4*i+i0] = t2; f[2+4*i+i0] = t3; } } } /* first transform in z */ ns = 1; for (l = 0; l < indz; l++) { ns2 = ns + ns; km = nzh/ns; kmr = km*nrz; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = nxhyd*(j + k1); j2 = nxhyd*(j + k2); t1 = conjf(sct[kmr*j]); v_t1 = _mm512_set4_ps(cimagf(t1),crealf(t1),cimagf(t1), crealf(t1)); i0 = ioff + j1; i1 = ioff + j2; /* vector loop over elements in blocks of 2 */ for (i = 0; i < nxhs; i+=2) { /* t2 = t1*f[4*i+i1]; */ /* t3 = t1*f[1+4*i+i1]; */ /* t4 = t1*f[2+4*i+i1]; */ v_t2 = _mm512_load_ps((float *)&f[4*i+i1]); v_t3 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,160); v_t3 = _mm512_mul_ps(v_t2,v_t3); v_t2 = (__m512)_mm512_shuffle_epi32((__m512i)v_t2,177); v_t4 = (__m512)_mm512_shuffle_epi32((__m512i)v_t1,245); v_t4 = _mm512_mul_ps(v_t2,v_t4); v_t4 = _mm512_mask_sub_ps(v_t4,_mm512_int2mask(21845), v_zero,v_t4); v_t2 = _mm512_add_ps(v_t3,v_t4); /* f[4*i+i1] = f[4*i+i0] - t2; */ /* f[1+4*i+i1] = f[1+4*i+i0] - t3; */ /* f[2+4*i+i1] = f[2+4*i+i0] - t4; */ v_t3 = _mm512_load_ps((float *)&f[4*i+i0]); v_t4 = _mm512_sub_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i1],v_t4); /* f[4*i+i0] += t2; */ /* f[1+4*i+i0] += t3; */ /* f[2+4*i+i0] += t4; */ v_t4 = _mm512_add_ps(v_t3,v_t2); _mm512_store_ps((float *)&f[4*i+i0],v_t4); } /* loop over remaining elements */ for (i = nxhs; i < nxh; i++) { t2 = t1*f[4*i+i1]; t3 = t1*f[1+4*i+i1]; t4 = t1*f[2+4*i+i1]; f[4*i+i1] = f[4*i+i0] - t2; f[1+4*i+i1] = f[1+4*i+i0] - t3; f[2+4*i+i1] = f[2+4*i+i0] - t4; f[4*i+i0] += t2; f[1+4*i+i0] += t3; f[2+4*i+i0] += t4; } } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void ckncwfft3rmx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for real to complex fft, with packed data */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform z fft */ ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ ckncfft3rmz(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ ckncfft3rmxy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd,nzd, nxhyzd,nxyzhd); } return; } /*--------------------------------------------------------------------*/ void ckncwfft3rm3(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int indz, int nxhd, int nyd, int nzd, int nxhyzd, int nxyzhd) { /* wrapper function for 3 2d real to complex ffts, with packed data */ /* local data */ int ny, nz; static int nyi = 1, nzi = 1; /* calculate range of indices */ ny = 1L<<indy; nz = 1L<<indz; /* inverse fourier transform */ if (isign < 0) { /* perform xy fft */ ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd, nzd,nxhyzd,nxyzhd); /* perform z fft */ ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); } /* forward fourier transform */ else if (isign > 0) { /* perform z fft */ ckncfft3rm3z(f,isign,mixup,sct,indx,indy,indz,nyi,ny,nxhd,nyd,nzd, nxhyzd,nxyzhd); /* perform xy fft */ ckncfft3rm3xy(f,isign,mixup,sct,indx,indy,indz,nzi,nz,nxhd,nyd, nzd,nxhyzd,nxyzhd); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void ckncgbppush3lt_(float *ppart, float *fxyz, float *bxyz ,int *kpic, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx, *nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1, *mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgbppushf3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { ckncgbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,ek, *idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv, *mx1,*my1,*mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncgrbppush3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgrbppush3lt(ppart,fxyz,bxyz,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp, *nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1, *my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgrbppushf3lt_(float *ppart, float *fxyz, float *bxyz, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { ckncgrbppushf3lt(ppart,fxyz,bxyz,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek, *idimp,*nppmx,*nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncgppost3lt_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1) { ckncgppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1); return; } /*--------------------------------------------------------------------*/ void cknc2gppost3lt_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1) { cknc2gppost3lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*mz,*nxv,*nyv, *nzv,*mx1,*my1,*mxyz1); return; } /*--------------------------------------------------------------------*/ void ckncgjppost3lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgjppost3lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*nz,*mx, *my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgjppostf3lt_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { ckncgjppostf3lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx, *ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1, *ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncgrjppost3lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { ckncgrjppost3lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*nz, *mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncgrjppostf3lt_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ntmax, int *irc) { ckncgrjppostf3lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp, *nx,*ny,*nz,*mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1, *mxyz1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cknc2gjppost3lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { cknc2gjppost3lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*nz,*mx, *my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void cknc2grjppost3lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *nxv, int *nyv, int *nzv, int *mx1, int *my1, int *mxyz1, int *ipbc) { cknc2grjppost3lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*nz, *mx,*my,*mz,*nxv,*nyv,*nzv,*mx1,*my1,*mxyz1,*ipbc); return; } /*--------------------------------------------------------------------*/ void ckncpporder3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { ckncpporder3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny,*nz, *mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncpporderf3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { ckncpporderf3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1, *mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void ckncpp2order3lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *nz, int *mx, int *my, int *mz, int *mx1, int *my1, int *mz1, int *npbmx, int *ntmax, int *irc) { ckncpp2order3lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny, *nz,*mx,*my,*mz,*mx1,*my1,*mz1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void cknccguard3l_(float *fxyz, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { cknccguard3l(fxyz,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncacguard3l_(float *cu, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { ckncacguard3l(cu,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncaguard3l_(float *q, int *nx, int *ny, int *nz, int *nxe, int *nye, int *nze) { ckncaguard3l(q,*nx,*ny,*nz,*nxe,*nye,*nze); return; } /*--------------------------------------------------------------------*/ void ckncmpois33_(float complex *q, float complex *fxyz, int *isign, float complex *ffc, float *ax, float *ay, float *az, float *affp, float *we, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncmpois33(q,fxyz,*isign,ffc,*ax,*ay,*az,*affp,we,*nx,*ny,*nz,*nxvh, *nyv,*nzv,*nxhd,*nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncmcuperp3_(float complex *cu, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv) { ckncmcuperp3(cu,*nx,*ny,*nz,*nxvh,*nyv,*nzv); return; } /*--------------------------------------------------------------------*/ void ckncmibpois33_(float complex *cu, float complex *bxyz, float complex *ffc, float *ci, float *wm, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncmibpois33(cu,bxyz,ffc,*ci,wm,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd, *nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncmmaxwel3_(float complex *exyz, float complex *bxyz, float complex *cu, float complex *ffc, float *ci, float *dt, float *wf, float *wm, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncmmaxwel3(exyz,bxyz,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nz,*nxvh,*nyv, *nzv,*nxhd,*nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncmemfield3_(float complex *fxyz, float complex *exyz, float complex *ffc, int *isign, int *nx, int *ny, int *nz, int *nxvh, int *nyv, int *nzv, int *nxhd, int *nyhd, int *nzhd) { ckncmemfield3(fxyz,exyz,ffc,*isign,*nx,*ny,*nz,*nxvh,*nyv,*nzv,*nxhd, *nyhd,*nzhd); return; } /*--------------------------------------------------------------------*/ void ckncwfft3rmx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { ckncwfft3rmx(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; } /*--------------------------------------------------------------------*/ void ckncwfft3rm3_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *indz, int *nxhd, int *nyd, int *nzd, int *nxhyzd, int *nxyzhd) { ckncwfft3rm3(f,*isign,mixup,sct,*indx,*indy,*indz,*nxhd,*nyd,*nzd, *nxhyzd,*nxyzhd); return; }
gas.h
/** * Author: Kartik Lakhotia Sourav Pati * Email id: klakhoti@usc.edu spati@usc.edu * Date: 27-Feb-2018 * * This code implements work optimized propagation blocking with * transposed bin graph to reduce cache misses in scatter */ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <omp.h> #include <assert.h> #include <vector> //#include <atomic> #include "../include/partition.h" using namespace std; template<class graph> void getFrontier(graph* G) { intV* prefix = new intV [(G->partListPtr)+1](); prefix[0] = 0; for (intV i=0; i<G->partListPtr; i++) { prefix[i+1] = prefix[i] + G->TD[G->activeScatter[i]].frontierSize; } #pragma omp parallel for for (intV i=0; i<G->partListPtr; i++) { for (intV j=0; j<G->TD[G->activeScatter[i]].frontierSize; j++) G->frontier[prefix[i]+j] = G->TD[G->activeScatter[i]].frontier[j]; } delete[] prefix; } template<class graph> void resetFrontier(graph* G) { #pragma omp parallel for for (intV i=0; i<G->partListPtr; i++) { for (intV j=0; j<G->TD[G->activeScatter[i]].frontierSize; j++) G->inFrontier[G->TD[G->activeScatter[i]].frontier[j]] = false; G->TD[G->activeScatter[i]].frontierSize = 0; } G->frontierSize = 0; } template<class graph> void loadFrontier(graph* G, intV* initFrontier, intV initFrontierSize) { partitionData* allTD = G->TD; intV vertexId, pId; G->partListPtr = 0; for (intV i=0; i<initFrontierSize; i++) { vertexId = initFrontier[i]; G->inFrontier[vertexId] = true; pId = (vertexId >> binOffsetBits); allTD[pId].frontier[allTD[pId].frontierSize++] = vertexId; allTD[pId].activeEdges += G->outDeg[vertexId]; if (!G->flag[pId]) { G->flag[pId] = true; G->activeScatter[G->partListPtr++] = pId; } } for (intV i=0; i<initFrontierSize; i++) { pId = (initFrontier[i] >> binOffsetBits); if (G->flag[pId]) G->flag[pId] = false; } G->frontierSize = initFrontierSize; } template<class graph> void loadFrontierPar(graph* G, intV* initFrontier, intV initFrontierSize) { partitionData* allTD = G->TD; G->partListPtr = 0; #pragma omp parallel for for (intV i=0; i<initFrontierSize; i++) { intV vertexId = initFrontier[i]; G->inFrontier[vertexId] = true; intV pId = (vertexId >> binOffsetBits); intV ptr = __sync_fetch_and_add(&allTD[pId].frontierSize, 1); allTD[pId].frontier[ptr] = vertexId; __sync_fetch_and_add(&allTD[pId].activeEdges, G->outDeg[vertexId]); if (__sync_bool_compare_and_swap(&G->flag[pId], false, true)) { ptr = G->partListPtr.fetch_add(1); G->activeScatter[ptr] = pId; } } #pragma omp parallel for for (intV i=0; i<initFrontierSize; i++) { intV pId = (initFrontier[i] >> binOffsetBits); if (G->flag[pId]) G->flag[pId] = false; } G->frontierSize = initFrontierSize; } template <class graph, class userArg> void reInitializeSparseFrontier(graph* G, partitionData* TD, userArg UA) { intV trueSize = 0; for (intV i=0; i<TD->frontierSize; i++) { G->inFrontier[TD->frontier[i]] = UA.initFunc(TD->frontier[i]); if (G->inFrontier[TD->frontier[i]]) { TD->frontier[trueSize++] = TD->frontier[i]; } } if ((trueSize > 0) && (__sync_bool_compare_and_swap(&G->flag[TD->tid], false, true))) { intV listPtr = G->partListPtr.fetch_add(1); G->activeGather[listPtr] = TD->tid; } TD->frontierSize = trueSize; } template <class graph, class userArg> void reInitializeDenseFrontier(graph* G, partitionData* TD, userArg UA) { for (intV i=TD->startVertex; i<TD->endVertex; i++) { UA.initFunc(i); } } template <class graph, class userArg> void filterFrontier(graph*G, partitionData* TD, userArg UA) { intV trueSize = 0; for (intV i=0; i<TD->frontierSize; i++) { G->inFrontier[TD->frontier[i]] = UA.filterFunc(TD->frontier[i]); if (G->inFrontier[TD->frontier[i]]) { TD->activeEdges += G->outDeg[TD->frontier[i]]; TD->frontier[trueSize++] = TD->frontier[i]; } } TD->frontierSize = trueSize; if (TD->frontierSize > 0) G->frontierSize.fetch_add(TD->frontierSize); } void densityCheck(partitionData* TD) { // TD->isDense = false; TD->isDense = ((28.0 * (float)TD->activeEdges) > ((float)(TD->PNG->numEdges)*10.5 + 2.67*(float)TD->totalEdges + 4.0*(float)NUM_BINS)); } template <class type,class graph, class userArg> void scatterVC(graph* G, partitionData* TD, type** updateBins, intV** destIdBins, intE* updateBinPointers, intE* destIdBinPointers, userArg UA) { intV destId = 0; intV destBin = 0; intV vertexId = 0; intV cond = 0; intV prevBin = 0; intV listPtr = 0; type userReturn; #ifdef WEIGHTED type weightedVal; #endif for (intV i=0; i<TD->frontierSize; i++) { vertexId = TD->frontier[i]; //pop an active vertex prevBin = NUM_BINS; userReturn = UA.scatterFunc(vertexId); //invoke user def func. on the vertex for (intE j=G->VI[vertexId]; j<G->VI[vertexId+1]; j++) { destId = G->EI[j]; destBin = (destId >> binOffsetBits); #ifdef WEIGHTED weightedVal = UA.applyWeight(userReturn, G->EW[j]); //apply weight to the value updateBins[destBin][destIdBinPointers[destBin]] = weightedVal; //store the update in update bins destIdBins[destBin][destIdBinPointers[destBin]++] = destId; //store the dest ID in destination bins #else /////////////////////////////////// ///// branch avoiding approach //// /////////////////////////////////// cond = (destBin != prevBin); updateBins[destBin][updateBinPointers[destBin]] = userReturn; updateBinPointers[destBin] += cond; destId |= (cond << MSB_ROT); destIdBins[destBin][destIdBinPointers[destBin]++] = destId; prevBin = destBin; /////////////////////////////////// //////// branched approach //////// /////////////////////////////////// //if (destBin!=prevBin) //{ // updateBins[destBin][updateBinPointers[destBin]++] = userReturn; // destId |= (1 << MSB_ROT); // prevBin = destBin; //} //destIdBins[destBin][destIdBinPointers[destBin]++] = destId; #endif if (!G->binFlag[TD->tid][destBin]) //if the corresponding message bin is not active yet { G->binFlag[TD->tid][destBin] = true; //mark the bin as active listPtr = G->TD[destBin].binListPtr.fetch_add(1); //atomically increase # active bins for destination partition G->activeBins[destBin][listPtr] = TD->tid; //convey the ID of bin to destination partition if (__sync_bool_compare_and_swap(&G->flag[destBin], false, true)) //if the destination partition isn't active { listPtr = G->partListPtr.fetch_add(1); // G->activeGather[listPtr] = destBin; } } } } } template <class type,class graph, class userArg> void scatterPC(graph*G, partitionData* TD, type** updateBins, userArg UA) { partitionGraph* PNG = TD->PNG; intE pointer; intV listPtr; type userReturn; #ifndef DENSE for (intV i=0; i<NUM_BINS; i++) { G->binFlag[TD->tid][i] = true; listPtr = G->TD[i].binListPtr.fetch_add(1); G->activeBins[i][listPtr] = TD->tid; if (__sync_bool_compare_and_swap(&G->flag[i], false, true)) { listPtr = G->partListPtr.fetch_add(1); G->activeGather[listPtr] = i; } } #endif for (intV i=0; i<PNG->numVertex; i++) { pointer = 0; for (intE j=PNG->VI[i]; j<PNG->VI[i+1]; j++){ userReturn = UA.scatterFunc(PNG->EI[j]); updateBins[i][pointer++] = userReturn; } } } template <class type, class graph, class userArg> void scatter(graph*G, partitionData* TD, type** updateBins, intV** destIdBins, intE* updateBinPointers, intE* destIdBinPointers, userArg UA) { #ifndef DENSE if (TD->isDense) scatterPC<type>(G, TD, updateBins, UA); else scatterVC<type>(G, TD, updateBins, destIdBins, updateBinPointers, destIdBinPointers, UA); reInitializeSparseFrontier(G, TD, UA); #else scatterPC<type>(G, TD, updateBins, UA); reInitializeDenseFrontier(G,TD,UA); #endif } ////////////////////////////////// /////////// PC GATHER //////////// ////////////////////////////////// template <class type, class graph, class userArg> #ifdef WEIGHTED void gatherPC(graph* G, partitionData* TD, type* updateBin, intV* destIdBin, unsigned int* weightBin, intE binSize, userArg UA) #else void gatherPC(graph* G, partitionData* TD, type* updateBin, intV* destIdBin, intE binSize, userArg UA) #endif { intV destId = 0; intE updateBinPointer = MAX_UINT; type updateVal; bool cond; for (intE j=0; j<binSize; j++) { destId = destIdBin[j]; updateBinPointer += (destId >> MSB_ROT); destId = destId & MAX_POS; #ifdef WEIGHTED updateVal = UA.applyWeight(updateBin[updateBinPointer], weightBin[j]); #else updateVal = updateBin[updateBinPointer]; #endif cond = UA.gatherFunc(updateVal, destId); if (!G->inFrontier[destId] && cond) { TD->frontier[TD->frontierSize++] = destId; G->inFrontier[destId] = true; } } } /////////////////////////////////// ///////// DENSE GATHER /////////// ////////////////////////////////// template <class type, class graph, class userArg> #ifdef WEIGHTED void gatherDense(graph* G, partitionData* TD, type* updateBin, intV* destIdBin, unsigned int* weightBin, intE binSize, userArg UA) #else void gatherDense(graph* G, partitionData* TD, type* updateBin, intV* destIdBin, intE binSize, userArg UA) #endif { intV destId = 0; intE updateBinPointer = MAX_UINT; type updateVal; for (intE j=0; j<binSize; j++) { destId = destIdBin[j]; updateBinPointer += (destId >> MSB_ROT); destId = destId & MAX_POS; #ifdef WEIGHTED updateVal = UA.applyWeight(updateBin[updateBinPointer], weightBin[j]); #else updateVal = updateBin[updateBinPointer]; #endif UA.gatherFunc(updateVal, destId); } } #ifdef WEIGHTED /////////////////////////////////// ///////// VC GATHER /////////// ////////////////////////////////// template <class type, class graph, class userArg> void gatherVC(graph* G, partitionData* TD, type* updateBin, intV* destIdBin, intE binSize, userArg UA) { intV destId = 0; intE updateBinPointer = MAX_UINT; bool cond; type updateVal; for (intE j=0; j<binSize; j++) { destId = destIdBin[j]; updateVal = updateBin[j]; cond = UA.gatherFunc(updateVal, destId); if (!G->inFrontier[destId] && cond) { TD->frontier[TD->frontierSize++] = destId; G->inFrontier[destId] = true; } } } #endif template <class type, class graph, class userArg> void gather(graph* G, partitionData* TD, type*** updateBins, intV*** denseDestIdBins, intV*** sparseDestIdBins, partitionData* allTD, intE** destIdBinAddrSize, intE** destIdBinPointers, intE** updateBinPointers, userArg UA) { TD->activeEdges = 0; #ifndef DENSE G->flag[TD->tid] = false; #endif #ifdef WEIGHTED unsigned int*** weightBin = G->indWeightBins; #endif for (intV ptr=0; ptr<TD->binListPtr; ptr++) { #ifndef DENSE intV i = G->activeBins[TD->tid][ptr]; if (!G->binFlag[i][TD->tid]) continue; //already done during scatter-gather mix G->binFlag[i][TD->tid] = false; #else intV i = ptr; #endif #ifdef WEIGHTED #ifndef DENSE if (allTD[i].isDense) gatherPC<type>(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], weightBin[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); else gatherVC<type>(G, TD, updateBins[i][TD->tid], sparseDestIdBins[i][TD->tid], destIdBinPointers[i][TD->tid], UA); #else gatherDense(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], weightBin[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); #endif #else #ifndef DENSE if (allTD[i].isDense) gatherPC<type>(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); else gatherPC<type>(G, TD, updateBins[i][TD->tid], sparseDestIdBins[i][TD->tid], destIdBinPointers[i][TD->tid], UA); #else gatherDense(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); #endif #endif destIdBinPointers[i][TD->tid] = 0; updateBinPointers[i][TD->tid] = 0; } #ifndef DENSE TD->binListPtr = 0; filterFrontier(G, TD, UA); #else for (intV i=TD->startVertex; i<TD->endVertex; i++) UA.filterFunc(i); #endif } template <class type, class graph, class userArg> void gatherIL(graph* G, partitionData* TD, type*** updateBins, intV*** denseDestIdBins, intV*** sparseDestIdBins, partitionData* allTD, intE** destIdBinAddrSize, intE** destIdBinPointers, intE** updateBinPointers, bool* scatterDone, userArg UA) { TD->activeEdges = 0; #ifdef WEIGHTED unsigned int*** weightBin = G->indWeightBins; #endif for (intV ptr=0; ptr<TD->binListPtr; ptr++) { #ifndef DENSE intV i = G->activeBins[TD->tid][ptr]; #else intV i = ptr; #endif if (!scatterDone[i]) continue; G->binFlag[i][TD->tid] = false; #ifdef WEIGHTED #ifndef DENSE if (allTD[i].isDense) gatherPC<type>(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], weightBin[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); else gatherVC<type>(G, TD, updateBins[i][TD->tid], sparseDestIdBins[i][TD->tid], destIdBinPointers[i][TD->tid], UA); #else gatherDense(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], weightBin[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); #endif #else #ifndef DENSE if (allTD[i].isDense) gatherPC<type>(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); else gatherPC<type>(G, TD, updateBins[i][TD->tid], sparseDestIdBins[i][TD->tid], destIdBinPointers[i][TD->tid], UA); #else gatherDense(G, TD, updateBins[i][TD->tid], denseDestIdBins[i][TD->tid], destIdBinAddrSize[i][TD->tid], UA); #endif #endif destIdBinPointers[i][TD->tid] = 0; updateBinPointers[i][TD->tid] = 0; } #ifndef DENSE filterFrontier(G, TD, UA); #else for (intV i=TD->startVertex; i<TD->endVertex; i++) UA.filterFunc(i); #endif } //for intra partition asynch processing template <class type, class graph, class userArg> void sgIntra(graph* G, partitionData* TD, userArg UA) { partitionGraph* IGSort = TD->IPG; if (TD->isDense) { for (intV i=0; i<IGSort->numVertex; i++) { intV vertexId = TD->startVertex + i; type userReturn = UA.scatterFunc(vertexId); for (intE j=IGSort->VI[i]; j<IGSort->VI[i+1]; j++) { intV destId = IGSort->EI[j]; #ifdef WEIGHTED type updateVal = UA.applyWeight(userReturn, IGSort->EW[j]); #else type updateVal = userReturn; #endif bool cond = UA.gatherFunc(updateVal, destId); if (!G->inFrontier[destId] && cond) { TD->frontier[TD->frontierSize++] = destId; G->inFrontier[destId] = true; } } } } else { for (intV i=0; i<TD->frontierSize; i++) { intV vertexId = TD->frontier[i]; intV inPartId = vertexId - TD->startVertex; type userReturn = UA.scatterFunc(vertexId); for (intE j=IGSort->VI[inPartId]; j<IGSort->VI[inPartId+1]; j++) { intV destId = IGSort->EI[j]; #ifdef WEIGHTED type updateVal = UA.applyWeight(userReturn, IGSort->EW[j]); #else type updateVal = userReturn; #endif bool cond = UA.gatherFunc(updateVal, destId); if (!G->inFrontier[destId] && cond) { TD->frontier[TD->frontierSize++] = destId; G->inFrontier[destId] = true; } } } } } template <class type, class graph, class userArg> void sgMix(graph* G, partitionData* TD, type*** updateBins, intV*** denseDestIdBins, intV*** sparseDestIdBins, partitionData* allTD, intE** destIdBinAddrSize, intE** destIdBinPointers, intE** updateBinPointers, bool* scatterDone, userArg UA) { sgIntra<type>(G, TD, UA); gatherIL<type>(G, TD, updateBins, denseDestIdBins, sparseDestIdBins, allTD, destIdBinAddrSize, destIdBinPointers, updateBinPointers, scatterDone, UA); densityCheck(TD); scatter<type>(G, TD, updateBins[TD->tid], sparseDestIdBins[TD->tid], updateBinPointers[TD->tid], destIdBinPointers[TD->tid], UA); scatterDone[TD->tid] = true; }
GB_unaryop__identity_uint32_uint64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_uint32_uint64 // op(A') function: GB_tran__identity_uint32_uint64 // C type: uint32_t // A type: uint64_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT32 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_uint32_uint64 ( uint32_t *restrict Cx, const uint64_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_uint32_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
elemwise_binary_scalar_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file elemwise_binary_scalar_op.h * \brief Function definition of elementwise binary scalar operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_ #include <mxnet/operator_util.h> #include <vector> #include <utility> #include "../mshadow_op.h" #include "../elemwise_op_common.h" #include "elemwise_unary_op.h" namespace mxnet { namespace op { class BinaryScalarOp : public UnaryOp { /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType> static void ComputeExDenseResultRSP(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { const double alpha = nnvm::get<double>(attrs.parsed); CHECK_EQ(output.shape(), input.shape()); const int64_t row_count = output.shape()[0]; const int64_t items_per_row = output.shape().Size() / row_count; const DType result_for_zero = OP::Map(DType(0), DType(alpha)); mshadow::Tensor<cpu, 1, DType> input_data = input.data().FlatTo1D<cpu, DType>(stream); mshadow::Tensor<cpu, 1, DType> output_data = output.data().FlatTo1D<cpu, DType>(stream); const int64_t sparse_row_count = input.aux_shape(rowsparse::kIdx).Size(); if (sparse_row_count != row_count) { mshadow::Tensor<cpu, 1, IType> row_indexes = input.aux_data( rowsparse::kIdx).FlatTo1D<cpu, IType>(stream); int64_t input_iter = 0; int64_t output_row = 0; IType next_input_row = 0; while (output_row < row_count) { next_input_row = input_iter < sparse_row_count ? int64_t(row_indexes[input_iter]) : row_count; // Split up into blocks of contiguous data and do those together // Do contiguous dense blocks const int64_t dense_block_count = next_input_row - output_row; if (dense_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<OpBase::SetToScalar<Req>, cpu>::Launch( stream, items_per_row * dense_block_count, output_data.dptr_ + items_per_row * output_row, result_for_zero); }); output_row += dense_block_count; continue; } // Do contiguous sparse blocks int64_t next_non_contiguous_sparse = input_iter; while (next_non_contiguous_sparse < sparse_row_count - 1) { if (row_indexes[next_non_contiguous_sparse + 1] != row_indexes[next_non_contiguous_sparse] + 1) { break; } ++next_non_contiguous_sparse; } const int64_t sparse_block_count = next_non_contiguous_sparse - input_iter + 1; if (sparse_block_count > 0) { MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * sparse_block_count, &output_data.dptr_[items_per_row * output_row], &input_data.dptr_[items_per_row * input_iter], DType(alpha)); }); output_row += sparse_block_count; input_iter += sparse_block_count; continue; } } } else { // All rows exist (eventually we don't have to do complex // things to call GPU kernels because we don't need to access row indices) MXNET_ASSIGN_REQ_SWITCH(req, Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, cpu>::Launch( stream, items_per_row * row_count, output_data.dptr_, input_data.dptr_, DType(alpha)); }); } } /*! \brief Tensor operation against a scalar with a dense result */ template<typename OP, typename DType, typename IType, typename CType> static void ComputeExDenseResultCSR(mshadow::Stream<cpu> *stream, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray &output) { CHECK_EQ(output.shape(), input.shape()); const double alpha = nnvm::get<double>(attrs.parsed); const DType dense_fill_val = OP::Map(DType(0), DType(alpha)); const TBlob column_indexes = input.aux_data(csr::kIdx); const size_t item_count = column_indexes.Size(); // Pre-fill dense with 0-input/output value FillDense<cpu, DType>(stream, output.shape().Size(), dense_fill_val, req, output.data().dptr<DType>()); mshadow::Tensor<cpu, 2, DType> out = AsRowise2D<DType>(stream, output.data()); if (item_count) { const DType *in = input.data().dptr<DType>(); const IType *column_indexes_ptr = column_indexes.dptr<IType>(); const auto row_count = static_cast<size_t>(input.shape()[0]); const TBlob row_starts = input.aux_data(csr::kIndPtr); const CType *row_starts_ptr = row_starts.dptr<CType>(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(row_count); ++i) { const bool last_row = i == static_cast<int>(row_count) - 1; // Split up into blocks of contiguous data and do those together const size_t row_item_start_iter = row_starts_ptr[i]; const size_t input_items_this_row = !last_row ? static_cast<size_t>(row_starts_ptr[i + 1]) - row_item_start_iter : item_count - row_item_start_iter; if (input_items_this_row) { const IType *this_row_column_indexes = column_indexes_ptr + row_item_start_iter; const DType *row_data_start = in + row_item_start_iter; DType *output_this_row = out[i].dptr_; // More overhead to use OMP for small loops, so don't if (input_items_this_row > 1000) { #pragma omp parallel for for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } else { for (CType j = 0; j < static_cast<CType>(input_items_this_row); ++j) { const IType col = this_row_column_indexes[j]; const DType val = row_data_start[j]; output_this_row[col] = OP::Map(val, DType(alpha)); } } } } } } template<typename xpu, typename OP, typename DType, typename IType> static void ComputeExDenseResult(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &input, const OpReqType req, const NDArray output) { mshadow::Stream<xpu> *stream = ctx.get_stream<xpu>(); CHECK_EQ(output.storage_type(), kDefaultStorage); switch (input.storage_type()) { case kRowSparseStorage: { ComputeExDenseResultRSP<OP, DType, IType>(stream, attrs, ctx, input, req, output); break; } case kCSRStorage: { MSHADOW_IDX_TYPE_SWITCH(input.aux_data(csr::kIndPtr).type_flag_, CType, { ComputeExDenseResultCSR<OP, DType, IType, CType>(stream, attrs, ctx, input, req, output); }); break; } default: CHECK(false) << "Unsupported sparse storage type"; break; } } public: template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { mxnet_op::Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, inputs[0].Size(), outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), DType(alpha)); }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { DCHECK_EQ(inputs.size(), 1); DCHECK_EQ(outputs.size(), 1); CHECK_NE(inputs[0].storage_type(), kDefaultStorage); if (outputs[0].storage_type() != kDefaultStorage) { CHECK_EQ(outputs[0].storage_type(), inputs[0].storage_type()); if (req[0] != kNullOp) { UnaryOp::MapToFCompute<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>); } } else { if (typeid(xpu) == typeid(gpu)) { mxnet::op::FCompExFallback<xpu>(attrs, ctx, inputs, req, outputs, Compute<xpu, OP>, "ComputeEx"); } else { MSHADOW_TYPE_SWITCH(outputs[0].data().type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, { ComputeExDenseResult<xpu, OP, DType, IType>(attrs, ctx, inputs[0], req[0], outputs[0]); }); }); } } } template<typename xpu, typename OP> static void Backward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mshadow; using namespace mshadow::expr; Stream<xpu> *s = ctx.get_stream<xpu>(); const double alpha = nnvm::get<double>(attrs.parsed); MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { Tensor<xpu, 1, DType> igrad = outputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> ograd = inputs[0].FlatTo1D<xpu, DType>(s); Tensor<xpu, 1, DType> lhs = inputs[1].FlatTo1D<xpu, DType>(s); ASSIGN_DISPATCH(igrad, req[0], ograd * F<OP>(lhs, scalar<DType>(DType(alpha)))); }); } }; #define MXNET_OPERATOR_REGISTER_BINARY_SCALAR(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(1) \ .set_num_outputs(1) \ .set_attr_parser([](NodeAttrs* attrs) { \ attrs->parsed = std::stod(attrs->dict["scalar"]); \ }) \ .set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<1, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<1, 1>) \ .set_attr<FInferStorageType>("FInferStorageType", ElemwiseStorageType<1, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}}; \ }) \ .add_argument("data", "NDArray-or-Symbol", "source input") \ .add_argument("scalar", "float", "scalar input") } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_SCALAR_OP_H_
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[4], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); artifact=GetImageArtifact(image, "trim:edges"); if (artifact == (const char *) NULL) { bounds.width=image->columns == 1 ? 1 : 0; bounds.height=image->rows == 1 ? 1 : 0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; } else { char *edges, *q, *r; bounds.width=(size_t) image->columns; bounds.height=(size_t) image->rows; bounds.x=0; bounds.y=0; edges=AcquireString(artifact); r=edges; while ((q=StringToken(",",&r)) != (char *) NULL) { if (LocaleCompare(q,"north") == 0) bounds.y=(ssize_t) image->rows; if (LocaleCompare(q,"east") == 0) bounds.width=0; if (LocaleCompare(q,"south") == 0) bounds.height=0; if (LocaleCompare(q,"west") == 0) bounds.x=(ssize_t) image->columns; } edges=DestroyString(edges); } GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t) image->rows-1,1,1,exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[3]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; q=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (q == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } q+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ memset(&edge_background,0,sizeof(edge_background)); artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *monotone_info, *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_info=AcquireVirtualMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_info == (MemoryInfo *) NULL)) { if (monotone_info != (MemoryInfo *) NULL) monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_info=RelinquishVirtualMemory(monotone_info); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { for (depth=1; depth < (size_t) MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[j])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[j])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[j] == ScaleAnyToQuantum(ScaleQuantumToAny(p[j],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } type=IdentifyImageGray(image,exception); if ((type == BilevelType) || (type == GrayscaleType) || (type == GrayscaleAlphaType)) return(type); if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleAlphaType)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=depth_map[ScaleQuantumToMap(q[j])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
simulation_context_base.h
// Copyright (c) 2013-2017 Anton Kozhevnikov, Thomas Schulthess // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that // the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the // following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions // and the following disclaimer in the documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /** \file simulation_context_base.h * * \brief Contains definition and implementation of Simulation_context_base class. */ #ifndef __SIMULATION_CONTEXT_BASE_H__ #define __SIMULATION_CONTEXT_BASE_H__ #include <algorithm> #include "version.h" #include "simulation_parameters.h" #include "mpi_grid.hpp" #include "radial_integrals.h" #ifdef __GPU extern "C" void generate_phase_factors_gpu(int num_gvec_loc__, int num_atoms__, int const* gvec__, double const* atom_pos__, double_complex* phase_factors__); #endif namespace sirius { /// Base class for Simulation_context. class Simulation_context_base: public Simulation_parameters { private: /// Communicator for this simulation. Communicator const& comm_; /// Auxiliary communicator for the fine-grid FFT transformation. Communicator comm_ortho_fft_; /// Auxiliary communicator for the coarse-grid FFT transformation. Communicator comm_ortho_fft_coarse_; Communicator comm_band_ortho_fft_coarse_; /// Unit cell of the simulation. Unit_cell unit_cell_; /// MPI grid for this simulation. std::unique_ptr<MPI_grid> mpi_grid_; /// 2D BLACS grid for distributed linear algebra operations. std::unique_ptr<BLACS_grid> blacs_grid_; /// Fine-grained FFT for density and potential. /** This is the FFT driver to transform periodic functions such as density and potential on the fine-grained * FFT grid. The transformation is parallel. */ std::unique_ptr<FFT3D> fft_; /// Coarse-grained FFT for application of local potential and density summation. std::unique_ptr<FFT3D> fft_coarse_; /// G-vectors within the Gmax cutoff. std::unique_ptr<Gvec> gvec_; std::unique_ptr<Gvec_partition> gvec_partition_; /// G-vectors within the 2 * |Gmax^{WF}| cutoff. std::unique_ptr<Gvec> gvec_coarse_; std::unique_ptr<Gvec_partition> gvec_coarse_partition_; std::unique_ptr<remap_gvec_to_shells> remap_gvec_; /// Creation time of the parameters. timeval start_time_; std::string start_time_tag_; ev_solver_t std_evp_solver_type_{ev_solver_t::lapack}; ev_solver_t gen_evp_solver_type_{ev_solver_t::lapack}; mdarray<double_complex, 3> phase_factors_; mdarray<double_complex, 3> sym_phase_factors_; /// Phase factors for atom types. mdarray<double_complex, 2> phase_factors_t_; mdarray<int, 2> gvec_coord_; std::vector<mdarray<double, 2>> atom_coord_; mdarray<char, 1> memory_buffer_; std::unique_ptr<Radial_integrals_beta<false>> beta_ri_; std::unique_ptr<Radial_integrals_beta<true>> beta_ri_djl_; std::unique_ptr<Radial_integrals_aug<false>> aug_ri_; std::unique_ptr<Radial_integrals_atomic_wf> atomic_wf_ri_; std::vector<std::vector<std::pair<int, double>>> atoms_to_grid_idx_; // TODO remove to somewhere const double av_atom_radius_{2.0}; double time_active_; bool initialized_{false}; inline void init_fft() { auto rlv = unit_cell_.reciprocal_lattice_vectors(); if (!(control().fft_mode_ == "serial" || control().fft_mode_ == "parallel")) { TERMINATE("wrong FFT mode"); } /* create FFT driver for dense mesh (density and potential) */ fft_ = std::unique_ptr<FFT3D>(new FFT3D(find_translations(pw_cutoff(), rlv), comm_fft(), processing_unit())); /* create FFT driver for coarse mesh */ auto fft_coarse_grid = FFT3D_grid(find_translations(2 * gk_cutoff(), rlv)); fft_coarse_ = std::unique_ptr<FFT3D>(new FFT3D(fft_coarse_grid, comm_fft_coarse(), processing_unit())); /* create a list of G-vectors for corase FFT grid */ gvec_coarse_ = std::unique_ptr<Gvec>(new Gvec(rlv, gk_cutoff() * 2, comm(), control().reduce_gvec_)); gvec_coarse_partition_ = std::unique_ptr<Gvec_partition>(new Gvec_partition(*gvec_coarse_, comm_fft_coarse(), comm_ortho_fft_coarse())); /* create a list of G-vectors for dense FFT grid; G-vectors are divided between all available MPI ranks.*/ //gvec_ = std::unique_ptr<Gvec>(new Gvec(rlv, pw_cutoff(), comm(), control().reduce_gvec_)); gvec_ = std::unique_ptr<Gvec>(new Gvec(pw_cutoff(), *gvec_coarse_)); gvec_partition_ = std::unique_ptr<Gvec_partition>(new Gvec_partition(*gvec_, comm_fft(), comm_ortho_fft())); remap_gvec_ = std::unique_ptr<remap_gvec_to_shells>(new remap_gvec_to_shells(comm(), gvec())); /* prepare fine-grained FFT driver for the entire simulation */ fft_->prepare(*gvec_partition_); } /* copy constructor is forbidden */ Simulation_context_base(Simulation_context_base const&) = delete; void start() { gettimeofday(&start_time_, NULL); tm const* ptm = localtime(&start_time_.tv_sec); char buf[100]; strftime(buf, sizeof(buf), "%Y%m%d_%H%M%S", ptm); start_time_tag_ = std::string(buf); } void init_atoms_to_grid_idx() { PROFILE("sirius::Simulation_context_base::init_atoms_to_grid_idx"); atoms_to_grid_idx_.resize(unit_cell_.num_atoms()); vector3d<double> delta(1.0 / fft_->grid().size(0), 1.0 / fft_->grid().size(1), 1.0 / fft_->grid().size(2)); int z_off = fft_->offset_z(); vector3d<int> grid_beg(0, 0, z_off); vector3d<int> grid_end(fft_->grid().size(0), fft_->grid().size(1), z_off + fft_->local_size_z()); /* approximate atom radius in bohr */ double R = av_atom_radius_; std::vector<vector3d<double>> verts_cart{{-R,-R,-R},{R,-R,-R},{-R,R,-R},{R,R,-R},{-R,-R,R},{R,-R,R},{-R,R,R},{R,R,R}}; auto bounds_box = [&](vector3d<double> pos) { std::vector<vector3d<double>> verts; for (auto v : verts_cart) { verts.push_back(pos + unit_cell_.get_fractional_coordinates(v)); } std::pair<vector3d<int>, vector3d<int>> bounds_ind; size_t size = verts.size(); for (int x: {0, 1, 2}) { std::sort(verts.begin(), verts.end(), [x](vector3d<double>& a, vector3d<double>& b){return a[x] < b[x];}); bounds_ind.first[x] = std::max((int)(verts[0][x] / delta[x]) - 1, grid_beg[x]); bounds_ind.second[x] = std::min((int)(verts[size - 1][x] / delta[x]) + 1, grid_end[x]); } return bounds_ind; }; #pragma omp parallel for for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { std::vector<std::pair<int, double>> atom_to_inds_map; for (int t0 = -1; t0 <= 1; t0++) { for (int t1 = -1; t1 <= 1; t1++) { for (int t2 = -1; t2 <= 1; t2++) { auto position = unit_cell_.atom(ia).position() + vector3d<double>(t0, t1, t2); auto box = bounds_box(position); for (int j0 = box.first[0]; j0 < box.second[0]; j0++) { for (int j1 = box.first[1]; j1 < box.second[1]; j1++) { for (int j2 = box.first[2]; j2 < box.second[2]; j2++) { auto dist = position - vector3d<double>(delta[0] * j0, delta[1] * j1, delta[2] * j2); auto r = unit_cell_.get_cartesian_coordinates(dist).length(); auto ir = fft_->grid().index_by_coord(j0, j1, j2 - z_off); if (r <= R) { atom_to_inds_map.push_back({ir, r}); } } } } } } } atoms_to_grid_idx_[ia] = std::move(atom_to_inds_map); } } public: Simulation_context_base(std::string const& fname__, Communicator const& comm__) : comm_(comm__) , unit_cell_(*this, comm_) { start(); import(fname__); unit_cell_.import(unit_cell_input_); } Simulation_context_base(Communicator const& comm__) : comm_(comm__) , unit_cell_(*this, comm_) { start(); } /// Initialize the similation (can only be called once). void initialize(); std::vector<std::vector<std::pair<int,double>>> const& atoms_to_grid_idx_map() { return atoms_to_grid_idx_; }; double av_atom_radius() { return av_atom_radius_; } void print_info(); Unit_cell& unit_cell() { return unit_cell_; } Unit_cell const& unit_cell() const { return unit_cell_; } inline FFT3D& fft() const { return *fft_; } inline FFT3D& fft_coarse() const { return *fft_coarse_; } Gvec const& gvec() const { return *gvec_; } Gvec_partition const& gvec_partition() const { return *gvec_partition_; } Gvec const& gvec_coarse() const { return *gvec_coarse_; } Gvec_partition const& gvec_coarse_partition() const { return *gvec_coarse_partition_; } remap_gvec_to_shells const& remap_gvec() const { return *remap_gvec_; } BLACS_grid const& blacs_grid() const { return *blacs_grid_; } /// Total communicator of the simulation. Communicator const& comm() const { return comm_; } /// Communicator between k-points. Communicator const& comm_k() const { /* 1st dimension of the MPI grid is used for k-point distribution */ return mpi_grid_->communicator(1 << 0); } /// Band and BLACS grid communicator. Communicator const& comm_band() const { /* 2nd and 3rd dimensions of the MPI grid are used for parallelization inside k-point */ return mpi_grid_->communicator(1 << 1 | 1 << 2); } /// Communicator of the dense FFT grid. Communicator const& comm_fft() const { /* 3rd dimension of MPI grid is used */ return mpi_grid_->communicator(1 << 2); } Communicator const& comm_ortho_fft() const { return comm_ortho_fft_; } /// Communicator of the coarse FFT grid. Communicator const& comm_fft_coarse() const { if (control().fft_mode_ == "serial") { return mpi_comm_self(); } else { return comm_fft(); } } Communicator const& comm_ortho_fft_coarse() const { return comm_ortho_fft_coarse_; } Communicator const& comm_band_ortho_fft_coarse() const { return comm_band_ortho_fft_coarse_; } void create_storage_file() const { if (comm_.rank() == 0) { /* create new hdf5 file */ HDF5_tree fout(storage_file_name, hdf5_access_t::truncate); fout.create_node("parameters"); fout.create_node("effective_potential"); fout.create_node("effective_magnetic_field"); fout.create_node("density"); fout.create_node("magnetization"); for (int j = 0; j < num_mag_dims(); j++) { fout["magnetization"].create_node(j); fout["effective_magnetic_field"].create_node(j); } fout["parameters"].write("num_spins", num_spins()); fout["parameters"].write("num_mag_dims", num_mag_dims()); fout["parameters"].write("num_bands", num_bands()); mdarray<int, 2> gv(3, gvec().num_gvec()); for (int ig = 0; ig < gvec().num_gvec(); ig++) { auto G = gvec().gvec(ig); for (int x: {0, 1, 2}) { gv(x, ig) = G[x]; } } fout["parameters"].write("num_gvec", gvec().num_gvec()); fout["parameters"].write("gvec", gv); fout.create_node("unit_cell"); fout["unit_cell"].create_node("atoms"); for (int j = 0; j < unit_cell().num_atoms(); j++) { fout["unit_cell"]["atoms"].create_node(j); fout["unit_cell"]["atoms"][j].write("mt_basis_size", unit_cell().atom(j).mt_basis_size()); } } comm_.barrier(); } inline std::string const& start_time_tag() const { return start_time_tag_; } inline ev_solver_t std_evp_solver_type() const { return std_evp_solver_type_; } inline ev_solver_t gen_evp_solver_type() const { return gen_evp_solver_type_; } template <typename T> inline std::unique_ptr<Eigensolver<T>> std_evp_solver() { return std::move(Eigensolver_factory<T>(std_evp_solver_type_)); } template <typename T> inline std::unique_ptr<Eigensolver<T>> gen_evp_solver() { return std::move(Eigensolver_factory<T>(gen_evp_solver_type_)); } /// Phase factors \f$ e^{i {\bf G} {\bf r}_{\alpha}} \f$ inline double_complex gvec_phase_factor(vector3d<int> G__, int ia__) const { return phase_factors_(0, G__[0], ia__) * phase_factors_(1, G__[1], ia__) * phase_factors_(2, G__[2], ia__); } /// Phase factors \f$ e^{i {\bf G} {\bf r}_{\alpha}} \f$ inline double_complex gvec_phase_factor(int ig__, int ia__) const { return gvec_phase_factor(gvec().gvec(ig__), ia__); } inline mdarray<int, 2> const& gvec_coord() const { return gvec_coord_; } inline mdarray<double, 2> const& atom_coord(int iat__) const { return atom_coord_[iat__]; } /// Generate phase factors \f$ e^{i {\bf G} {\bf r}_{\alpha}} \f$ for all atoms of a given type. inline void generate_phase_factors(int iat__, mdarray<double_complex, 2>& phase_factors__) const { PROFILE("sirius::Simulation_context_base::generate_phase_factors"); int na = unit_cell_.atom_type(iat__).num_atoms(); switch (processing_unit_) { case CPU: { #pragma omp parallel for for (int igloc = 0; igloc < gvec().count(); igloc++) { int ig = gvec().offset() + igloc; for (int i = 0; i < na; i++) { int ia = unit_cell().atom_type(iat__).atom_id(i); phase_factors__(igloc, i) = gvec_phase_factor(ig, ia); } } break; } case GPU: { #ifdef __GPU generate_phase_factors_gpu(gvec().count(), na, gvec_coord().at<GPU>(), atom_coord(iat__).at<GPU>(), phase_factors__.at<GPU>()); #else TERMINATE_NO_GPU #endif break; } } } /// Make periodic function out of form factors. /** Return vector of plane-wave coefficients */ template <index_domain_t index_domain> inline std::vector<double_complex> make_periodic_function(std::function<double(int, double)> form_factors__) const { PROFILE("sirius::Simulation_context_base::make_periodic_function"); double fourpi_omega = fourpi / unit_cell_.omega(); int ngv = (index_domain == index_domain_t::local) ? gvec().count() : gvec().num_gvec(); std::vector<double_complex> f_pw(ngv, double_complex(0, 0)); #pragma omp parallel for schedule(static) for (int igloc = 0; igloc < gvec().count(); igloc++) { /* global index of G-vector */ int ig = gvec().offset() + igloc; double g = gvec().gvec_len(ig); int j = (index_domain == index_domain_t::local) ? igloc : ig; for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { f_pw[j] += fourpi_omega * std::conj(phase_factors_t_(igloc, iat)) * form_factors__(iat, g); } } if (index_domain == index_domain_t::global) { comm_.allgather(&f_pw[0], gvec().offset(), gvec().count()); } return std::move(f_pw); } /// Return pointer to already allocated temporary memory buffer. /** Buffer can only grow in size. The requested buffer length is in bytes. */ inline void* memory_buffer(size_t size__) { /* reallocate if needed */ if (memory_buffer_.size() < size__) { memory_buffer_ = mdarray<char, 1>(size__); } return memory_buffer_.at<CPU>(); } inline Radial_integrals_beta<false> const& beta_ri() const { return *beta_ri_; } inline Radial_integrals_beta<true> const& beta_ri_djl() const { return *beta_ri_djl_; } inline Radial_integrals_aug<false> const& aug_ri() const { return *aug_ri_; } inline Radial_integrals_atomic_wf const& atomic_wf_ri() const { return *atomic_wf_ri_; } /// Find the lambda parameter used in the Ewald summation. /** lambda parameter scales the erfc function argument: * \f[ * {\rm erf}(\sqrt{\lambda}x) * \f] */ double ewald_lambda() { /* alpha = 1 / (2*sigma^2), selecting alpha here for better convergence */ double lambda{1}; double gmax = pw_cutoff(); double upper_bound{0}; double charge = unit_cell_.num_electrons(); /* iterate to find lambda */ do { lambda += 0.1; upper_bound = charge * charge * std::sqrt(2.0 * lambda / twopi) * gsl_sf_erfc(gmax * std::sqrt(1.0 / (4.0 * lambda))); } while (upper_bound < 1.0e-8); if (lambda < 1.5) { std::stringstream s; s << "Ewald forces error: probably, pw_cutoff is too small."; WARNING(s); } return lambda; } mdarray<double_complex, 3> const& sym_phase_factors() const { return sym_phase_factors_; } }; inline void Simulation_context_base::initialize() { PROFILE("sirius::Simulation_context_base::initialize"); /* can't initialize twice */ if (initialized_) { TERMINATE("Simulation parameters are already initialized."); } /* Gamma-point calculation and non-collinear magnetism are not compatible */ if (num_mag_dims() == 3) { set_gamma_point(false); } set_esm_type(parameters_input().esm_); set_core_relativity(parameters_input().core_relativity_); set_valence_relativity(parameters_input().valence_relativity_); /* get processing unit */ std::string pu = control().processing_unit_; if (pu == "") { #ifdef __GPU pu = "gpu"; #else pu = "cpu"; #endif } set_processing_unit(pu); /* check if we can use a GPU device */ if (processing_unit() == GPU) { #ifndef __GPU TERMINATE_NO_GPU #endif } /* check MPI grid dimensions and set a default grid if needed */ if (!control().mpi_grid_dims_.size()) { set_mpi_grid_dims({1, 1}); } if (control().mpi_grid_dims_.size() != 2) { TERMINATE("wrong MPI grid"); } int npr = control_input_.mpi_grid_dims_[0]; int npc = control_input_.mpi_grid_dims_[1]; int npb = npr * npc; int npk = comm_.size() / npb; if (npk * npb != comm_.size()) { std::stringstream s; s << "Can't divide " << comm_.size() << " ranks into groups of size " << npb; TERMINATE(s); } /* setup MPI grid */ mpi_grid_ = std::unique_ptr<MPI_grid>(new MPI_grid({npk, npc, npr}, comm_)); comm_ortho_fft_ = comm().split(comm_fft().rank()); comm_ortho_fft_coarse_ = comm().split(comm_fft_coarse().rank()); comm_band_ortho_fft_coarse_ = comm_band().split(comm_fft_coarse().rank()); /* can't use reduced G-vectors in LAPW code */ if (full_potential()) { control_input_.reduce_gvec_ = false; } if (!iterative_solver_input_.type_.size()) { if (full_potential()) { iterative_solver_input_.type_ = "exact"; } else { iterative_solver_input_.type_ = "davidson"; } } /* initialize variables, related to the unit cell */ unit_cell_.initialize(); /* find the cutoff for G+k vectors (derived from rgkmax (aw_cutoff here) and minimum MT radius) */ if (full_potential()) { set_gk_cutoff(aw_cutoff() / unit_cell_.min_mt_radius()); } if (!full_potential()) { set_lmax_rho(unit_cell_.lmax() * 2); set_lmax_pot(unit_cell_.lmax() * 2); set_lmax_apw(-1); } /* initialize FFT interface */ init_fft(); init_atoms_to_grid_idx(); if (comm_.rank() == 0 && control().print_memory_usage_) { MEMORY_USAGE_INFO(); } if (unit_cell_.num_atoms() != 0 && use_symmetry() && control().verification_ >= 1) { unit_cell_.symmetry().check_gvec_symmetry(gvec(), comm()); if (!full_potential()) { unit_cell_.symmetry().check_gvec_symmetry(gvec_coarse(), comm()); } } auto& fft_grid = fft().grid(); std::pair<int, int> limits(0, 0); for (int x: {0, 1, 2}) { limits.first = std::min(limits.first, fft_grid.limits(x).first); limits.second = std::max(limits.second, fft_grid.limits(x).second); } phase_factors_ = mdarray<double_complex, 3>(3, limits, unit_cell().num_atoms(), memory_t::host, "phase_factors_"); #pragma omp parallel for for (int i = limits.first; i <= limits.second; i++) { for (int ia = 0; ia < unit_cell_.num_atoms(); ia++) { auto pos = unit_cell_.atom(ia).position(); for (int x: {0, 1, 2}) { phase_factors_(x, i, ia) = std::exp(double_complex(0.0, twopi * (i * pos[x]))); } } } phase_factors_t_ = mdarray<double_complex, 2>(gvec().count(), unit_cell().num_atom_types()); #pragma omp parallel for schedule(static) for (int igloc = 0; igloc < gvec().count(); igloc++) { /* global index of G-vector */ int ig = gvec().offset() + igloc; for (int iat = 0; iat < unit_cell().num_atom_types(); iat++) { double_complex z(0, 0); for (int ia = 0; ia < unit_cell().atom_type(iat).num_atoms(); ia++) { z += gvec_phase_factor(ig, unit_cell().atom_type(iat).atom_id(ia)); } phase_factors_t_(igloc, iat) = z; } } if (use_symmetry()) { sym_phase_factors_ = mdarray<double_complex, 3>(3, limits, unit_cell().symmetry().num_mag_sym()); #pragma omp parallel for for (int i = limits.first; i <= limits.second; i++) { for (int isym = 0; isym < unit_cell().symmetry().num_mag_sym(); isym++) { auto t = unit_cell().symmetry().magnetic_group_symmetry(isym).spg_op.t; for (int x: {0, 1, 2}) { sym_phase_factors_(x, i, isym) = std::exp(double_complex(0.0, twopi * (i * t[x]))); } } } } int nbnd = static_cast<int>(unit_cell_.num_valence_electrons() / 2.0) + std::max(10, static_cast<int>(0.1 * unit_cell_.num_valence_electrons())); if (full_potential()) { /* take 10% of empty non-magnetic states */ if (num_fv_states() < 0) { num_fv_states(nbnd); } if (num_fv_states() < static_cast<int>(unit_cell_.num_valence_electrons() / 2.0)) { std::stringstream s; s << "not enough first-variational states : " << num_fv_states(); TERMINATE(s); } } else { if (num_mag_dims() == 3) { nbnd *= 2; } if (num_bands() < 0) { num_bands(nbnd); } } std::string evsn[] = {std_evp_solver_name(), gen_evp_solver_name()}; /* deduce the default eigen-value solver */ if (comm_band().size() == 1 || npc == 1 || npr == 1) { if (evsn[0] == "") { #if defined(__GPU) && defined(__MAGMA) evsn[0] = "magma"; #else evsn[0] = "lapack"; #endif } if (evsn[1] == "") { #if defined(__GPU) && defined(__MAGMA) evsn[1] = "magma"; #else evsn[1] = "lapack"; #endif } } else { if (evsn[0] == "") { #ifdef __SCALAPACK evsn[0] = "scalapack"; #endif #ifdef __ELPA evsn[0] = "elpa1"; #endif } if (evsn[1] == "") { #ifdef __SCALAPACK evsn[1] = "scalapack"; #endif #ifdef __ELPA evsn[1] = "elpa1"; #endif } } ev_solver_t* evst[] = {&std_evp_solver_type_, &gen_evp_solver_type_}; std::map<std::string, ev_solver_t> str_to_ev_solver_t = { {"lapack", ev_solver_t::lapack}, {"scalapack", ev_solver_t::scalapack}, {"elpa1", ev_solver_t::elpa1}, {"elpa2", ev_solver_t::elpa2}, {"magma", ev_solver_t::magma}, {"plasma", ev_solver_t::plasma} }; for (int i: {0, 1}) { auto name = evsn[i]; if (str_to_ev_solver_t.count(name) == 0) { std::stringstream s; s << "wrong eigen value solver " << name; TERMINATE(s); } *evst[i] = str_to_ev_solver_t[name]; } auto std_solver = std_evp_solver<double>(); auto gen_solver = gen_evp_solver<double>(); if (std_solver->is_parallel() != gen_solver->is_parallel()) { TERMINATE("both solvers must be sequential or parallel"); } /* setup BLACS grid */ if (std_solver->is_parallel()) { blacs_grid_ = std::unique_ptr<BLACS_grid>(new BLACS_grid(comm_band(), npr, npc)); } else { blacs_grid_ = std::unique_ptr<BLACS_grid>(new BLACS_grid(mpi_comm_self(), 1, 1)); } /* setup the cyclic block size */ if (cyclic_block_size() < 0) { double a = std::min(std::log2(double(num_bands()) / blacs_grid_->num_ranks_col()), std::log2(double(num_bands()) / blacs_grid_->num_ranks_row())); if (a < 1) { control_input_.cyclic_block_size_ = 2; } else { control_input_.cyclic_block_size_ = static_cast<int>(std::min(128.0, std::pow(2.0, static_cast<int>(a))) + 1e-12); } } if (processing_unit() == GPU) { gvec_coord_ = mdarray<int, 2>(gvec().count(), 3, memory_t::host | memory_t::device, "gvec_coord_"); for (int igloc = 0; igloc < gvec().count(); igloc++) { int ig = gvec().offset() + igloc; auto G = gvec().gvec(ig); for (int x: {0, 1, 2}) { gvec_coord_(igloc, x) = G[x]; } } gvec_coord_.copy<memory_t::host, memory_t::device>(); for (int iat = 0; iat < unit_cell_.num_atom_types(); iat++) { int nat = unit_cell_.atom_type(iat).num_atoms(); atom_coord_.push_back(std::move(mdarray<double, 2>(nat, 3, memory_t::host | memory_t::device))); for (int i = 0; i < nat; i++) { int ia = unit_cell_.atom_type(iat).atom_id(i); for (int x: {0, 1, 2}) { atom_coord_.back()(i, x) = unit_cell_.atom(ia).position()[x]; } } atom_coord_.back().copy<memory_t::host, memory_t::device>(); } } if (!full_potential()) { /* some extra length is added to cutoffs in order to interface with QE which may require ri(q) for q>cutoff */ beta_ri_ = std::unique_ptr<Radial_integrals_beta<false>>(new Radial_integrals_beta<false>(unit_cell(), gk_cutoff() + 1, settings().nprii_beta_)); beta_ri_djl_ = std::unique_ptr<Radial_integrals_beta<true>>(new Radial_integrals_beta<true>(unit_cell(), gk_cutoff() + 1, settings().nprii_beta_)); aug_ri_ = std::unique_ptr<Radial_integrals_aug<false>>(new Radial_integrals_aug<false>(unit_cell(), pw_cutoff() + 1, settings().nprii_aug_)); atomic_wf_ri_ = std::unique_ptr<Radial_integrals_atomic_wf>(new Radial_integrals_atomic_wf(unit_cell(), gk_cutoff(), 20)); } //time_active_ = -runtime::wtime(); if (control().verbosity_ >= 1 && comm().rank() == 0) { print_info(); } if (control().verbosity_ >= 3) { runtime::pstdout pout(comm()); if (comm().rank() == 0) { pout.printf("--- MPI rank placement ---\n"); } pout.printf("rank: %3i, comm_band_rank: %3i, comm_k_rank: %3i, hostname: %s\n", comm().rank(), comm_band().rank(), comm_k().rank(), runtime::hostname().c_str()); } if (comm_.rank() == 0 && control().print_memory_usage_) { MEMORY_USAGE_INFO(); } initialized_ = true; } inline void Simulation_context_base::print_info() { tm const* ptm = localtime(&start_time_.tv_sec); char buf[100]; strftime(buf, sizeof(buf), "%a, %e %b %Y %H:%M:%S", ptm); printf("\n"); printf("SIRIUS version : %2i.%02i\n", major_version, minor_version); printf("git hash : %s\n", git_hash); printf("build date : %s\n", build_date); printf("start time : %s\n", buf); printf("\n"); printf("number of MPI ranks : %i\n", comm_.size()); printf("MPI grid :"); for (int i = 0; i < mpi_grid_->num_dimensions(); i++) { printf(" %i", mpi_grid_->dimension_size(i)); } printf("\n"); printf("maximum number of OMP threads : %i\n", omp_get_max_threads()); std::string headers[] = {"FFT context for density and potential", "FFT context for coarse grid"}; double cutoffs[] = {pw_cutoff(), 2 * gk_cutoff()}; Communicator const* comms[] = {&comm_fft(), &comm_fft_coarse()}; FFT3D_grid const* fft_grids[] = {&fft_->grid(), &fft_coarse_->grid()}; Gvec const* gvecs[] = {&gvec(), &gvec_coarse()}; printf("\n"); for (int i = 0; i < 2; i++) { printf("%s\n", headers[i].c_str()); printf("=====================================\n"); printf(" comm size : %i\n", comms[i]->size()); printf(" plane wave cutoff : %f\n", cutoffs[i]); printf(" grid size : %i %i %i total : %i\n", fft_grids[i]->size(0), fft_grids[i]->size(1), fft_grids[i]->size(2), fft_grids[i]->size()); printf(" grid limits : %i %i %i %i %i %i\n", fft_grids[i]->limits(0).first, fft_grids[i]->limits(0).second, fft_grids[i]->limits(1).first, fft_grids[i]->limits(1).second, fft_grids[i]->limits(2).first, fft_grids[i]->limits(2).second); printf(" number of G-vectors within the cutoff : %i\n", gvecs[i]->num_gvec()); printf(" local number of G-vectors : %i\n", gvecs[i]->count()); printf(" number of G-shells : %i\n", gvecs[i]->num_shells()); printf("\n"); } unit_cell_.print_info(control().verbosity_); for (int i = 0; i < unit_cell_.num_atom_types(); i++) { unit_cell_.atom_type(i).print_info(); } printf("\n"); printf("total nuclear charge : %i\n", unit_cell().total_nuclear_charge()); printf("number of core electrons : %f\n", unit_cell().num_core_electrons()); printf("number of valence electrons : %f\n", unit_cell().num_valence_electrons()); printf("total number of electrons : %f\n", unit_cell().num_electrons()); printf("total number of aw basis functions : %i\n", unit_cell().mt_aw_basis_size()); printf("total number of lo basis functions : %i\n", unit_cell().mt_lo_basis_size()); printf("number of first-variational states : %i\n", num_fv_states()); printf("number of bands : %i\n", num_bands()); printf("number of spins : %i\n", num_spins()); printf("number of magnetic dimensions : %i\n", num_mag_dims()); printf("lmax_apw : %i\n", lmax_apw()); printf("lmax_rho : %i\n", lmax_rho()); printf("lmax_pot : %i\n", lmax_pot()); printf("lmax_rf : %i\n", unit_cell_.lmax()); printf("smearing width : %f\n", smearing_width()); printf("cyclic block size : %i\n", cyclic_block_size()); printf("|G+k| cutoff : %f\n", gk_cutoff()); std::string reln[] = {"valence relativity : ", "core relativity : "}; relativity_t relt[] = {valence_relativity_, core_relativity_}; for (int i = 0; i < 2; i++) { printf("%s", reln[i].c_str()); switch (relt[i]) { case relativity_t::none: { printf("none\n"); break; } case relativity_t::koelling_harmon: { printf("Koelling-Harmon\n"); break; } case relativity_t::zora: { printf("zora\n"); break; } case relativity_t::iora: { printf("iora\n"); break; } case relativity_t::dirac: { printf("Dirac\n"); break; } } } std::string evsn[] = {"standard eigen-value solver : ", "generalized eigen-value solver : "}; ev_solver_t evst[] = {std_evp_solver_type_, gen_evp_solver_type_}; for (int i = 0; i < 2; i++) { printf("%s", evsn[i].c_str()); switch (evst[i]) { case ev_solver_t::lapack: { printf("LAPACK\n"); break; } #ifdef __SCALAPACK case ev_solver_t::scalapack: { printf("ScaLAPACK\n"); break; } case ev_solver_t::elpa1: { printf("ELPA1\n"); break; } case ev_solver_t::elpa2: { printf("ELPA2\n"); break; } //case ev_rs_gpu: { // printf("RS_gpu\n"); // break; //} //case ev_rs_cpu: { // printf("RS_cpu\n"); // break; //} #endif case ev_solver_t::magma: { printf("MAGMA\n"); break; } case ev_solver_t::plasma: { printf("PLASMA\n"); break; } default: { TERMINATE("wrong eigen-value solver"); } } } printf("processing unit : "); switch (processing_unit()) { case CPU: { printf("CPU\n"); break; } case GPU: { printf("GPU\n"); break; } } if (processing_unit() == GPU) { #ifdef __GPU acc::print_device_info(0); #endif } int i{1}; printf("\n"); printf("XC functionals\n"); printf("==============\n"); for (auto& xc_label: xc_functionals()) { XC_functional xc(xc_label, num_spins()); printf("%i) %s: %s\n", i, xc_label.c_str(), xc.name().c_str()); printf("%s\n", xc.refs().c_str()); i++; } } } // namespace #endif
mandel.c
/* ** PROGRAM: Mandelbrot area ** ** PURPOSE: Program to compute the area of a Mandelbrot set. ** Correct answer should be around 1.510659. ** WARNING: this program may contain errors ** ** USAGE: Program runs without input ... just run the executable ** ** HISTORY: Written: (Mark Bull, August 2011). ** Changed "comples" to "d_comples" to avoid collsion with ** math.h complex type (Tim Mattson, September 2011) */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> # define NPOINTS 1000 # define MAXITER 1000 void testpoint(void); struct d_complex{ double r; double i; }; struct d_complex c; int numoutside = 0; int main(){ int i, j; double area, error, eps = 1.0e-5; // Loop over grid of points in the complex plane which contains the Mandelbrot set, // testing each point to see whether it is inside or outside the set. #pragma omp parallel for default(shared) private(c,eps) for (i=0; i<NPOINTS; i++) { for (j=0; j<NPOINTS; j++) { c.r = -2.0+2.5*(double)(i)/(double)(NPOINTS)+eps; c.i = 1.125*(double)(j)/(double)(NPOINTS)+eps; testpoint(); } } // Calculate area of set and error estimate and output the results area=2.0*2.5*1.125*(double)(NPOINTS*NPOINTS-numoutside)/(double)(NPOINTS*NPOINTS); error=area/(double)NPOINTS; printf("Area of Mandlebrot set = %12.8f +/- %12.8f\n",area,error); printf("Correct answer should be around 1.510659\n"); } void testpoint(void){ // Does the iteration z=z*z+c, until |z| > 2 when point is known to be outside set // If loop count reaches MAXITER, point is considered to be inside the set struct d_complex z; int iter; double temp; z=c; for (iter=0; iter<MAXITER; iter++){ temp = (z.r*z.r)-(z.i*z.i)+c.r; z.i = z.r*z.i*2+c.i; z.r = temp; if ((z.r*z.r+z.i*z.i)>4.0) { numoutside++; break; } } }
pfem_2_monolithic_slip_strategy.h
#ifndef KRATOS_PFEM2_MONOLITHIC_SLIP_STRATEGY_H #define KRATOS_PFEM2_MONOLITHIC_SLIP_STRATEGY_H #include "includes/define.h" #include "includes/model_part.h" #include "utilities/openmp_utils.h" #include "processes/process.h" #include "solving_strategies/schemes/scheme.h" #include "solving_strategies/strategies/solving_strategy.h" //#include "custom_elements/fractional_step.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme.h" #include "solving_strategies/schemes/residualbased_incrementalupdate_static_scheme_slip.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver.h" #include "solving_strategies/builder_and_solvers/residualbased_elimination_builder_and_solver_componentwise.h" #include "solving_strategies/strategies/residualbased_linear_strategy.h" #include "custom_utilities/solver_settings.h" namespace Kratos { ///@addtogroup FluidDynamicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ template<class TSparseSpace, class TDenseSpace, class TLinearSolver > class PFEM2MonolithicSlipStrategy : public SolvingStrategy<TSparseSpace,TDenseSpace,TLinearSolver> { public: ///@name Type Definitions ///@{ /// Counted pointer of FSStrategy typedef boost::shared_ptr< FSStrategy<TSparseSpace, TDenseSpace, TLinearSolver> > Pointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TDataType TDataType; //typedef typename BaseType::DofSetType DofSetType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer StrategyPointerType; typedef SolverSettings<TSparseSpace,TDenseSpace,TLinearSolver> SolverSettingsType; ///@} ///@name Life Cycle ///@{ PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector): BaseType(rModelPart,false), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { InitializeStrategy(rSolverConfig,PredictorCorrector); } PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, SolverSettingsType& rSolverConfig, bool PredictorCorrector, const Kratos::Variable<int>& PeriodicVar): BaseType(rModelPart,false), mrPeriodicIdVar(PeriodicVar) { InitializeStrategy(rSolverConfig,PredictorCorrector); } SolvingStrategyPython(self.model_part, self.time_scheme, self.monolithic_linear_solver, self.conv_criteria, CalculateReactionFlag, ReformDofSetAtEachStep, MoveMeshFlag) self.monolithic_solver.SetMaximumIterations(self.maximum_nonlin_iterations) PFEM2MonolithicSlipStrategy(ModelPart& rModelPart, /*SolverConfiguration<TSparseSpace, TDenseSpace, TLinearSolver>& rSolverConfig,*/ typename TLinearSolver::Pointer pLinearSolver, bool ReformDofSet = true, double Tol = 0.01, int MaxIterations = 3, unsigned int DomainSize = 2): BaseType(rModelPart,MoveMeshFlag), // Move Mesh flag, pass as input? mVelocityTolerance(VelTol), mPressureTolerance(PresTol), mMaxVelocityIter(MaxVelocityIterations), mMaxPressureIter(MaxPressureIterations), mDomainSize(DomainSize), mTimeOrder(TimeOrder), mPredictorCorrector(PredictorCorrector), mUseSlipConditions(true), ///@todo initialize somehow mReformDofSet(ReformDofSet), mExtraIterationSteps(), mrPeriodicIdVar(Kratos::Variable<int>::StaticObject()) { KRATOS_TRY; BaseType::SetEchoLevel(1); // Check that input parameters are reasonable and sufficient. this->Check(); bool CalculateReactions = false; bool CalculateNormDxFlag = true; bool ReformDofAtEachIteration = false; // DofSet modifiaction is managed by the fractional step strategy, auxiliary strategies should not modify the DofSet directly. // Additional Typedefs typedef typename Kratos::VariableComponent<Kratos::VectorComponentAdaptor<Kratos::array_1d<double, 3 > > > VarComponent; typedef typename BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>::Pointer BuilderSolverTypePointer; typedef SolvingStrategy<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; //initializing fractional velocity solution step typedef Scheme< TSparseSpace, TDenseSpace > SchemeType; typename SchemeType::Pointer pScheme; if (mUseSlipConditions) { typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticSchemeSlip< TSparseSpace, TDenseSpace > (mDomainSize,mDomainSize)); pScheme.swap(Temp); } else { typename SchemeType::Pointer Temp = typename SchemeType::Pointer(new ResidualBasedIncrementalUpdateStaticScheme< TSparseSpace, TDenseSpace > ()); pScheme.swap(Temp); } //CONSTRUCTION OF VELOCITY BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver > (pVelocityLinearSolver)); // BuilderSolverTypePointer vel_build = BuilderSolverTypePointer(new ResidualBasedEliminationBuilderAndSolverSlip<TSparseSpace, TDenseSpace, TLinearSolver, VarComponent > (pNewVelocityLinearSolver, this->mDomainSize, VELOCITY_X, VELOCITY_Y, VELOCITY_Z)); this->mpMomentumStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pVelocityLinearSolver, vel_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpMomentumStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); BuilderSolverTypePointer pressure_build = BuilderSolverTypePointer( //new ResidualBasedEliminationBuilderAndSolver<TSparseSpace,TDenseSpace,TLinearSolver>(pPressureLinearSolver)); new ResidualBasedEliminationBuilderAndSolverComponentwise<TSparseSpace, TDenseSpace, TLinearSolver, Variable<double> >(pPressureLinearSolver, PRESSURE)); this->mpPressureStrategy = typename BaseType::Pointer(new ResidualBasedLinearStrategy<TSparseSpace, TDenseSpace, TLinearSolver > (rModelPart, pScheme, pPressureLinearSolver, pressure_build, CalculateReactions, ReformDofAtEachIteration, CalculateNormDxFlag)); this->mpPressureStrategy->SetEchoLevel( BaseType::GetEchoLevel() ); if (mUseSlipConditions) { #pragma omp parallel { ModelPart::ConditionIterator CondBegin; ModelPart::ConditionIterator CondEnd; OpenMPUtils::PartitionedIterators(rModelPart.Conditions(),CondBegin,CondEnd); for (ModelPart::ConditionIterator itCond = CondBegin; itCond != CondEnd; ++itCond) { const double FlagValue = itCond->GetValue(IS_STRUCTURE); itCond->Set(SLIP); if (FlagValue != 0.0) { Condition::GeometryType& rGeom = itCond->GetGeometry(); for (unsigned int i = 0; i < rGeom.PointsNumber(); ++i) { rGeom[i].SetLock(); rGeom[i].SetValue(IS_STRUCTURE,FlagValue); rGeom[i].Set(SLIP); rGeom[i].UnSetLock(); } } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(IS_STRUCTURE); rModelPart.GetCommunicator().SynchronizeOrNodalFlags(SLIP); } KRATOS_CATCH(""); } /// Destructor. virtual ~FSStrategy(){} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ virtual int Check() { KRATOS_TRY; // Check elements and conditions in the model part int ierr = BaseType::Check(); if (ierr != 0) return ierr; if(DELTA_TIME.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error,"DELTA_TIME Key is 0. Check that the application was correctly registered.",""); if(BDF_COEFFICIENTS.Key() == 0) KRATOS_THROW_ERROR(std::runtime_error,"BDF_COEFFICIENTS Key is 0. Check that the application was correctly registered.",""); ModelPart& rModelPart = BaseType::GetModelPart(); if ( mTimeOrder == 2 && rModelPart.GetBufferSize() < 3 ) KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (BDF2), needed 3, got ",rModelPart.GetBufferSize()); if ( mTimeOrder == 1 && rModelPart.GetBufferSize() < 2 ) KRATOS_THROW_ERROR(std::invalid_argument,"Buffer size too small for fractional step strategy (Backward Euler), needed 2, got ",rModelPart.GetBufferSize()); const ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); for ( ModelPart::ElementIterator itEl = rModelPart.ElementsBegin(); itEl != rModelPart.ElementsEnd(); ++itEl ) { ierr = itEl->Check(rCurrentProcessInfo); if (ierr != 0) break; } for ( ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); ++itCond) { ierr = itCond->Check(rCurrentProcessInfo); if (ierr != 0) break; } return ierr; KRATOS_CATCH(""); } virtual double Solve() { // Initialize BDF2 coefficients ModelPart& rModelPart = BaseType::GetModelPart(); this->SetTimeCoefficients(rModelPart.GetProcessInfo()); double NormDp = 0.0; if (mPredictorCorrector) { bool Converged = false; // Iterative solution for pressure for(unsigned int it = 0; it < mMaxPressureIter; ++it) { if ( BaseType::GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Pressure iteration " << it << std::endl; NormDp = this->SolveStep(); Converged = this->CheckPressureConvergence(NormDp); if ( Converged ) { if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Predictor-corrector converged in " << it+1 << " iterations." << std::endl; break; } } if (!Converged && BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Predictor-correctior iterations did not converge." << std::endl; } else { // Solve for fractional step velocity, then update pressure once NormDp = this->SolveStep(); } if (mReformDofSet) this->Clear(); return NormDp; } virtual void CalculateReactions() { ModelPart& rModelPart = BaseType::GetModelPart(); ProcessInfo& rCurrentProcessInfo = rModelPart.GetProcessInfo(); // Set fractional step index to the momentum equation step int OriginalStep = rCurrentProcessInfo[FRACTIONAL_STEP]; rCurrentProcessInfo.SetValue(FRACTIONAL_STEP,1); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); const array_1d<double,3> Zero(3,0.0); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { itNode->FastGetSolutionStepValue(REACTION) = Zero; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); LocalSystemVectorType RHS_Contribution; LocalSystemMatrixType LHS_Contribution; for (ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem) { //itElem->InitializeNonLinearIteration(rCurrentProcessInfo); // Build local system itElem->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, rCurrentProcessInfo); Element::GeometryType& rGeom = itElem->GetGeometry(); unsigned int NumNodes = rGeom.PointsNumber(); unsigned int index = 0; for (unsigned int i = 0; i < NumNodes; i++) { rGeom[i].SetLock(); array_1d<double,3>& rReaction = rGeom[i].FastGetSolutionStepValue(REACTION); for (unsigned int d = 0; d < mDomainSize; ++d) rReaction[d] -= RHS_Contribution[index++]; rGeom[i].UnSetLock(); } } } rModelPart.GetCommunicator().AssembleCurrentData(REACTION); // Reset original fractional step index rCurrentProcessInfo.SetValue(FRACTIONAL_STEP,OriginalStep); } virtual void AddIterationStep(Process::Pointer pNewStep) { mExtraIterationSteps.push_back(pNewStep); } virtual void ClearExtraIterationSteps() { mExtraIterationSteps.clear(); } virtual void Clear() { mpMomentumStrategy->Clear(); mpPressureStrategy->Clear(); } ///@} ///@name Access ///@{ virtual void SetEchoLevel(int Level) { BaseType::SetEchoLevel(Level); int StrategyLevel = Level > 0 ? Level - 1 : 0; mpMomentumStrategy->SetEchoLevel(StrategyLevel); mpPressureStrategy->SetEchoLevel(StrategyLevel); } ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "FSStrategy" ; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {rOStream << "FSStrategy";} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected Life Cycle ///@{ ///@} ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ /// Calculate the coefficients for time iteration. /** * @param rCurrentProcessInfo ProcessInfo instance from the fluid ModelPart. Must contain DELTA_TIME and BDF_COEFFICIENTS variables. */ void SetTimeCoefficients(ProcessInfo& rCurrentProcessInfo) { KRATOS_TRY; if (mTimeOrder == 2) { //calculate the BDF coefficients double Dt = rCurrentProcessInfo[DELTA_TIME]; double OldDt = rCurrentProcessInfo.GetPreviousTimeStepInfo(1)[DELTA_TIME]; double Rho = OldDt / Dt; double TimeCoeff = 1.0 / (Dt * Rho * Rho + Dt * Rho); Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(3, false); BDFcoeffs[0] = TimeCoeff * (Rho * Rho + 2.0 * Rho); //coefficient for step n+1 (3/2Dt if Dt is constant) BDFcoeffs[1] = -TimeCoeff * (Rho * Rho + 2.0 * Rho + 1.0); //coefficient for step n (-4/2Dt if Dt is constant) BDFcoeffs[2] = TimeCoeff; //coefficient for step n-1 (1/2Dt if Dt is constant) } else if (mTimeOrder == 1) { double Dt = rCurrentProcessInfo[DELTA_TIME]; double TimeCoeff = 1.0 / Dt; Vector& BDFcoeffs = rCurrentProcessInfo[BDF_COEFFICIENTS]; BDFcoeffs.resize(2, false); BDFcoeffs[0] = TimeCoeff; //coefficient for step n+1 (1/Dt) BDFcoeffs[1] = -TimeCoeff; //coefficient for step n (-1/Dt) } KRATOS_CATCH(""); } double SolveStep() { ModelPart& rModelPart = BaseType::GetModelPart(); // 1. Fractional step momentum iteration rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); bool Converged = false; int Rank = rModelPart.GetCommunicator().MyPID(); for(unsigned int it = 0; it < mMaxVelocityIter; ++it) { if ( BaseType::GetEchoLevel() > 1 && Rank == 0) std::cout << "Momentum iteration " << it << std::endl; // build momentum system and solve for fractional step velocity increment rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,1); double NormDv = mpMomentumStrategy->Solve(); // // Compute projections (for stabilization) // rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); // this->ComputeSplitOssProjections(rModelPart); // // Additional steps // Moved to end of step // for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); // iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) // (*iExtraSteps)->Execute(); // Check convergence Converged = this->CheckFractionalStepConvergence(NormDv); if (Converged) { if ( BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Fractional velocity converged in " << it+1 << " iterations." << std::endl; break; } } if (!Converged && BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Fractional velocity iterations did not converge." << std::endl; // Compute projections (for stabilization) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,4); this->ComputeSplitOssProjections(rModelPart); // 2. Pressure solution (store pressure variation in PRESSURE_OLD_IT) rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,5); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { const double OldPress = itNode->FastGetSolutionStepValue(PRESSURE); itNode->FastGetSolutionStepValue(PRESSURE_OLD_IT) = -OldPress; } } if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Calculating Pressure." << std::endl; double NormDp = mpPressureStrategy->Solve(); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) itNode->FastGetSolutionStepValue(PRESSURE_OLD_IT) += itNode->FastGetSolutionStepValue(PRESSURE); } // 3. Compute end-of-step velocity if (BaseType::GetEchoLevel() > 0 && Rank == 0) std::cout << "Updating Velocity." << std::endl; rModelPart.GetProcessInfo().SetValue(FRACTIONAL_STEP,6); this->CalculateEndOfStepVelocity(); // Additional steps for (std::vector<Process::Pointer>::iterator iExtraSteps = mExtraIterationSteps.begin(); iExtraSteps != mExtraIterationSteps.end(); ++iExtraSteps) (*iExtraSteps)->Execute(); return NormDp; } bool CheckFractionalStepConvergence(const double NormDv) { ModelPart& rModelPart = BaseType::GetModelPart(); double NormV = 0.00; #pragma omp parallel reduction(+:NormV) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const array_1d<double,3> &Vel = itNode->FastGetSolutionStepValue(VELOCITY); for (unsigned int d = 0; d < 3; ++d) NormV += Vel[d] * Vel[d]; } } BaseType::GetModelPart().GetCommunicator().SumAll(NormV); NormV = sqrt(NormV); if (NormV == 0.0) NormV = 1.00; double Ratio = NormDv / NormV; if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Fractional velocity relative error: " << Ratio << std::endl; if (Ratio < mVelocityTolerance) { return true; } else return false; } bool CheckPressureConvergence(const double NormDp) { ModelPart& rModelPart = BaseType::GetModelPart(); double NormP = 0.00; #pragma omp parallel reduction(+:NormP) { ModelPart::NodeIterator NodeBegin; ModelPart::NodeIterator NodeEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for (ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode) { const double Pr = itNode->FastGetSolutionStepValue(PRESSURE); NormP += Pr * Pr; } } BaseType::GetModelPart().GetCommunicator().SumAll(NormP); NormP = sqrt(NormP); if (NormP == 0.0) NormP = 1.00; double Ratio = NormDp / NormP; if ( BaseType::GetEchoLevel() > 0 && rModelPart.GetCommunicator().MyPID() == 0) std::cout << "Pressure relative error: " << Ratio << std::endl; if (Ratio < mPressureTolerance) { return true; } else return false; } void ComputeSplitOssProjections(ModelPart& rModelPart) { const array_1d<double,3> Zero(3,0.0); array_1d<double,3> Out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { itNode->FastGetSolutionStepValue(CONV_PROJ) = Zero; itNode->FastGetSolutionStepValue(PRESS_PROJ) = Zero; itNode->FastGetSolutionStepValue(DIVPROJ) = 0.0; itNode->FastGetSolutionStepValue(NODAL_AREA) = 0.0; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem ) { itElem->Calculate(CONV_PROJ,Out,rModelPart.GetProcessInfo()); } } rModelPart.GetCommunicator().AssembleCurrentData(CONV_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleCurrentData(DIVPROJ); rModelPart.GetCommunicator().AssembleCurrentData(NODAL_AREA); // If there are periodic conditions, add contributions from both sides to the periodic nodes this->PeriodicConditionProjectionCorrection(rModelPart); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); itNode->FastGetSolutionStepValue(CONV_PROJ) /= NodalArea; itNode->FastGetSolutionStepValue(PRESS_PROJ) /= NodalArea; itNode->FastGetSolutionStepValue(DIVPROJ) /= NodalArea; } } } void CalculateEndOfStepVelocity() { ModelPart& rModelPart = BaseType::GetModelPart(); const array_1d<double,3> Zero(3,0.0); array_1d<double,3> Out(3,0.0); #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { itNode->FastGetSolutionStepValue(FRACT_VEL) = Zero; } } #pragma omp parallel { ModelPart::ElementIterator ElemBegin; ModelPart::ElementIterator ElemEnd; OpenMPUtils::PartitionedIterators(rModelPart.Elements(),ElemBegin,ElemEnd); for ( ModelPart::ElementIterator itElem = ElemBegin; itElem != ElemEnd; ++itElem ) { itElem->Calculate(VELOCITY,Out,rModelPart.GetProcessInfo()); } } rModelPart.GetCommunicator().AssembleCurrentData(FRACT_VEL); this->PeriodicConditionVelocityCorrection(rModelPart); // Force the end of step velocity to verify slip conditions in the model if (mUseSlipConditions) this->EnforceSlipCondition(SLIP); if (mDomainSize > 2) { #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); if ( ! itNode->IsFixed(VELOCITY_X) ) itNode->FastGetSolutionStepValue(VELOCITY_X) += itNode->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Y) ) itNode->FastGetSolutionStepValue(VELOCITY_Y) += itNode->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Z) ) itNode->FastGetSolutionStepValue(VELOCITY_Z) += itNode->FastGetSolutionStepValue(FRACT_VEL_Z) / NodalArea; } } } else { #pragma omp parallel { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodesBegin,NodesEnd); for ( ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode ) { const double NodalArea = itNode->FastGetSolutionStepValue(NODAL_AREA); if ( ! itNode->IsFixed(VELOCITY_X) ) itNode->FastGetSolutionStepValue(VELOCITY_X) += itNode->FastGetSolutionStepValue(FRACT_VEL_X) / NodalArea; if ( ! itNode->IsFixed(VELOCITY_Y) ) itNode->FastGetSolutionStepValue(VELOCITY_Y) += itNode->FastGetSolutionStepValue(FRACT_VEL_Y) / NodalArea; } } } } /** * @brief Substract wall-normal component of velocity update to ensure that the final velocity satisfies slip conditions. * @param rSlipWallFlag If Node.Is(rSlipWallFlag) the node is in the wall. */ void EnforceSlipCondition(const Kratos::Flags& rSlipWallFlag) { ModelPart& rModelPart = BaseType::GetModelPart(); #pragma omp parallel { ModelPart::NodeIterator NodeBegin; // = rModelPart.NodesBegin(); ModelPart::NodeIterator NodeEnd; // = rModelPart.NodesEnd(); OpenMPUtils::PartitionedIterators(rModelPart.Nodes(),NodeBegin,NodeEnd); for ( ModelPart::NodeIterator itNode = NodeBegin; itNode != NodeEnd; ++itNode ) { if ( itNode->Is(rSlipWallFlag) ) { const array_1d<double,3>& rNormal = itNode->FastGetSolutionStepValue(NORMAL); array_1d<double,3>& rDeltaVelocity = itNode->FastGetSolutionStepValue(FRACT_VEL); double Proj = rNormal[0] * rDeltaVelocity[0]; double Norm = rNormal[0] * rNormal[0]; for (unsigned int d = 1; d < mDomainSize; ++d) { Proj += rNormal[d] * rDeltaVelocity[d]; Norm += rNormal[d] * rNormal[d]; } Proj /= Norm; rDeltaVelocity -= Proj * rNormal; } } } } /** On periodic boundaries, the nodal area and the values to project need to take into account contributions from elements on * both sides of the boundary. This is done using the conditions and the non-historical nodal data containers as follows:\n * 1- The partition that owns the PeriodicCondition adds the values on both nodes to their non-historical containers.\n * 2- The non-historical containers are added across processes, transmiting the right value from the condition owner to all partitions.\n * 3- The value on all periodic nodes is replaced by the one received in step 2. */ void PeriodicConditionProjectionCorrection(ModelPart& rModelPart) { if (mrPeriodicIdVar.Key() != 0) { int GlobalNodesNum = rModelPart.GetCommunicator().LocalMesh().Nodes().size(); rModelPart.GetCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { double NodalArea = rNode0.FastGetSolutionStepValue(NODAL_AREA) + rNode1.FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rNode0.FastGetSolutionStepValue(CONV_PROJ) + rNode1.FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rNode0.FastGetSolutionStepValue(PRESS_PROJ) + rNode1.FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rNode0.FastGetSolutionStepValue(DIVPROJ) + rNode1.FastGetSolutionStepValue(DIVPROJ); rNode0.GetValue(NODAL_AREA) = NodalArea; rNode0.GetValue(CONV_PROJ) = ConvProj; rNode0.GetValue(PRESS_PROJ) = PressProj; rNode0.GetValue(DIVPROJ) = DivProj; rNode1.GetValue(NODAL_AREA) = NodalArea; rNode1.GetValue(CONV_PROJ) = ConvProj; rNode1.GetValue(PRESS_PROJ) = PressProj; rNode1.GetValue(DIVPROJ) = DivProj; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { double NodalArea = rGeom[0].FastGetSolutionStepValue(NODAL_AREA); array_1d<double,3> ConvProj = rGeom[0].FastGetSolutionStepValue(CONV_PROJ); array_1d<double,3> PressProj = rGeom[0].FastGetSolutionStepValue(PRESS_PROJ); double DivProj = rGeom[0].FastGetSolutionStepValue(DIVPROJ); for (unsigned int i = 1; i < 4; i++) { NodalArea += rGeom[i].FastGetSolutionStepValue(NODAL_AREA); ConvProj += rGeom[i].FastGetSolutionStepValue(CONV_PROJ); PressProj += rGeom[i].FastGetSolutionStepValue(PRESS_PROJ); DivProj += rGeom[i].FastGetSolutionStepValue(DIVPROJ); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(NODAL_AREA) = NodalArea; rGeom[i].GetValue(CONV_PROJ) = ConvProj; rGeom[i].GetValue(PRESS_PROJ) = PressProj; rGeom[i].GetValue(DIVPROJ) = DivProj; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(NODAL_AREA); rModelPart.GetCommunicator().AssembleNonHistoricalData(CONV_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(PRESS_PROJ); rModelPart.GetCommunicator().AssembleNonHistoricalData(DIVPROJ); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { if (itNode->GetValue(NODAL_AREA) != 0.0) { itNode->FastGetSolutionStepValue(NODAL_AREA) = itNode->GetValue(NODAL_AREA); itNode->FastGetSolutionStepValue(CONV_PROJ) = itNode->GetValue(CONV_PROJ); itNode->FastGetSolutionStepValue(PRESS_PROJ) = itNode->GetValue(PRESS_PROJ); itNode->FastGetSolutionStepValue(DIVPROJ) = itNode->GetValue(DIVPROJ); // reset for next iteration itNode->GetValue(NODAL_AREA) = 0.0; itNode->GetValue(CONV_PROJ) = array_1d<double,3>(3,0.0); itNode->GetValue(PRESS_PROJ) = array_1d<double,3>(3,0.0); itNode->GetValue(DIVPROJ) = 0.0; } } } } void PeriodicConditionVelocityCorrection(ModelPart& rModelPart) { if (mrPeriodicIdVar.Key() != 0) { int GlobalNodesNum = rModelPart.GetCommunicator().LocalMesh().Nodes().size(); rModelPart.GetCommunicator().SumAll(GlobalNodesNum); for (typename ModelPart::ConditionIterator itCond = rModelPart.ConditionsBegin(); itCond != rModelPart.ConditionsEnd(); itCond++ ) { ModelPart::ConditionType::GeometryType& rGeom = itCond->GetGeometry(); if (rGeom.PointsNumber() == 2) { Node<3>& rNode0 = rGeom[0]; int Node0Pair = rNode0.FastGetSolutionStepValue(mrPeriodicIdVar); Node<3>& rNode1 = rGeom[1]; int Node1Pair = rNode1.FastGetSolutionStepValue(mrPeriodicIdVar); // If the nodes are marked as a periodic pair (this is to avoid acting on two-noded conditions that are not PeriodicCondition) if ( ( static_cast<int>(rNode0.Id()) == Node1Pair ) && (static_cast<int>(rNode1.Id()) == Node0Pair ) ) { array_1d<double,3> DeltaVel = rNode0.FastGetSolutionStepValue(FRACT_VEL) + rNode1.FastGetSolutionStepValue(FRACT_VEL); rNode0.GetValue(FRACT_VEL) = DeltaVel; rNode1.GetValue(FRACT_VEL) = DeltaVel; } } else if (rGeom.PointsNumber() == 4 && rGeom[0].FastGetSolutionStepValue(mrPeriodicIdVar) > GlobalNodesNum) { array_1d<double,3> DeltaVel = rGeom[0].FastGetSolutionStepValue(FRACT_VEL); for (unsigned int i = 1; i < 4; i++) { DeltaVel += rGeom[i].FastGetSolutionStepValue(FRACT_VEL); } for (unsigned int i = 0; i < 4; i++) { rGeom[i].GetValue(FRACT_VEL) = DeltaVel; } } } rModelPart.GetCommunicator().AssembleNonHistoricalData(FRACT_VEL); for (typename ModelPart::NodeIterator itNode = rModelPart.NodesBegin(); itNode != rModelPart.NodesEnd(); itNode++) { array_1d<double,3>& rDeltaVel = itNode->GetValue(FRACT_VEL); if ( rDeltaVel[0]*rDeltaVel[0] + rDeltaVel[1]*rDeltaVel[1] + rDeltaVel[2]*rDeltaVel[2] != 0.0) { itNode->FastGetSolutionStepValue(FRACT_VEL) = itNode->GetValue(FRACT_VEL); rDeltaVel = array_1d<double,3>(3,0.0); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ double mVelocityTolerance; double mPressureTolerance; unsigned int mMaxVelocityIter; unsigned int mMaxPressureIter; unsigned int mDomainSize; unsigned int mTimeOrder; bool mPredictorCorrector; bool mUseSlipConditions; bool mReformDofSet; // Fractional step index. /* 1 : Momentum step (calculate fractional step velocity) * 2-3 : Unused (reserved for componentwise calculation of frac step velocity) * 4 : Pressure step * 5 : Computation of projections * 6 : End of step velocity */ // unsigned int mStepId; /// Scheme for the solution of the momentum equation StrategyPointerType mpMomentumStrategy; /// Scheme for the solution of the mass equation StrategyPointerType mpPressureStrategy; std::vector< Process::Pointer > mExtraIterationSteps; const Kratos::Variable<int>& mrPeriodicIdVar; ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ void InitializeStrategy(SolverSettingsType& rSolverConfig, bool PredictorCorrector) { KRATOS_TRY; mTimeOrder = rSolverConfig.GetTimeOrder(); // Check that input parameters are reasonable and sufficient. this->Check(); ModelPart& rModelPart = this->GetModelPart(); mDomainSize = rSolverConfig.GetDomainSize(); mPredictorCorrector = PredictorCorrector; mUseSlipConditions = rSolverConfig.UseSlipConditions(); mReformDofSet = rSolverConfig.GetReformDofSet(); BaseType::SetEchoLevel(rSolverConfig.GetEchoLevel()); // Initialize strategies for each step bool HaveVelStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Velocity,mpMomentumStrategy); if (HaveVelStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Velocity,mVelocityTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Velocity,mMaxVelocityIter); } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Velocity strategy defined in FractionalStepSettings",""); } bool HavePressStrategy = rSolverConfig.FindStrategy(SolverSettingsType::Pressure,mpPressureStrategy); if (HavePressStrategy) { rSolverConfig.FindTolerance(SolverSettingsType::Pressure,mPressureTolerance); rSolverConfig.FindMaxIter(SolverSettingsType::Pressure,mMaxPressureIter); } else { KRATOS_THROW_ERROR(std::runtime_error,"FS_Strategy error: No Pressure strategy defined in FractionalStepSettings",""); } Process::Pointer pTurbulenceProcess; bool HaveTurbulence = rSolverConfig.GetTurbulenceModel(pTurbulenceProcess); if (HaveTurbulence) mExtraIterationSteps.push_back(pTurbulenceProcess); // Set up nodes to use slip conditions if needed. if (mUseSlipConditions) { #pragma omp parallel { ModelPart::ConditionIterator CondBegin; ModelPart::ConditionIterator CondEnd; OpenMPUtils::PartitionedIterators(rModelPart.Conditions(),CondBegin,CondEnd); for (ModelPart::ConditionIterator itCond = CondBegin; itCond != CondEnd; ++itCond) { const bool is_slip = itCond->Is(SLIP); if (is_slip) { Condition::GeometryType& rGeom = itCond->GetGeometry(); for (unsigned int i = 0; i < rGeom.PointsNumber(); ++i) { rGeom[i].SetLock(); rGeom[i].Set(SLIP); rGeom[i].UnSetLock(); } } } } rModelPart.GetCommunicator().SynchronizeOrNodalFlags(SLIP); } // Check input parameters this->Check(); KRATOS_CATCH(""); } ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. FSStrategy& operator=(FSStrategy const& rOther){} /// Copy constructor. FSStrategy(FSStrategy const& rOther){} ///@} }; /// Class FStepStrategy ///@} ///@name Type Definitions ///@{ ///@} ///@} // addtogroup } // namespace Kratos. #endif // KRATOS_FS_STRATEGY_H
SpatialConvolutionMM.c
#ifndef TH_GENERIC_FILE #define TH_GENERIC_FILE "generic/SpatialConvolutionMM.c" #else /* note: due to write issues, this one cannot be parallelized as well as unfolded_copy */ static void nn_(unfolded_acc)(THTensor *finput, THTensor *input, int kW, int kH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { int nip; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); #pragma omp parallel for private(nip) for(nip = 0; nip < nInputPlane; nip++) { int kw, kh, y; for(kh = 0; kh < kH; kh++) { for(kw = 0; kw < kW; kw++) { real *src = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); real *dst = input_data + nip*(inputHeight*inputWidth) + kh*inputWidth + kw; for(y = 0; y < outputHeight; y++) THVector_(add)(dst+y*inputWidth, src+y*outputWidth, 1, outputWidth); /* note: THVector_add could handle 1 value better */ } } } } static void nn_(unfolded_copy)(THTensor *finput, THTensor *input, int kW, int kH, int nInputPlane, int inputWidth, int inputHeight, int outputWidth, int outputHeight) { long k; real *input_data = THTensor_(data)(input); real *finput_data = THTensor_(data)(finput); #pragma omp parallel for private(k) for(k = 0; k < nInputPlane*kH*kW; k++) { int nip = k / (kH*kW); int rest = k % (kH*kW); int kh = rest / kW; int kw = rest % kW; int y; real *dst = finput_data + nip*(kH*kW*outputHeight*outputWidth) + kh*(kW*outputHeight*outputWidth) + kw*(outputHeight*outputWidth); real *src = input_data + nip*(inputHeight*inputWidth) + kh*inputWidth + kw; for(y = 0; y < outputHeight; y++) memcpy(dst+y*outputWidth, src+y*inputWidth, sizeof(real)*outputWidth); } } static void nn_(SpatialConvolutionMM_updateOutput_frame)(THTensor *input, THTensor *output, THTensor *weight, THTensor *bias, THTensor *finput, int kW, int kH, long nInputPlane, long inputWidth, long inputHeight, long nOutputPlane, long outputWidth, long outputHeight) { long i; nn_(unfolded_copy)(finput, input, kW, kH, nInputPlane, inputWidth, inputHeight, outputWidth, outputHeight); THTensor *output2d = THTensor_(newWithStorage2d)(output->storage, output->storageOffset, nOutputPlane, -1, outputHeight*outputWidth, -1); for(i = 0; i < nOutputPlane; i++) THVector_(fill)(output->storage->data+output->storageOffset+output->stride[0]*i, THTensor_(get1d)(bias, i), outputHeight*outputWidth); THTensor_(addmm)(output2d, 1, output2d, 1, weight, finput); THTensor_(free)(output2d); } static int nn_(SpatialConvolutionMM_updateOutput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *bias = luaT_getfieldcheckudata(L, 1, "bias", torch_Tensor); THTensor *output = luaT_getfieldcheckudata(L, 1, "output", torch_Tensor); luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected"); int dimf = 0; int dimw = 2; int dimh = 1; if (input->nDimension == 4) { dimf++; dimw++; dimh++; } long nInputPlane = input->size[dimf]; long inputWidth = input->size[dimw]; long inputHeight = input->size[dimh]; long nOutputPlane = weight->size[0]; long outputWidth = (inputWidth - kW) + 1; long outputHeight = (inputHeight - kH) + 1; if(input->nDimension == 3) { THTensor_(resize2d)(finput, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize3d)(output, nOutputPlane, outputHeight, outputWidth); nn_(SpatialConvolutionMM_updateOutput_frame)(input, output, weight, bias, finput, kW, kH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); } else { long T = input->size[0]; long t; THTensor_(resize3d)(finput, T, kW*kH*nInputPlane, outputHeight*outputWidth); THTensor_(resize4d)(output, T, nOutputPlane, outputHeight, outputWidth); THStorage_(clearFlag)(input->storage, TH_STORAGE_REFCOUNTED); THStorage_(clearFlag)(output->storage, TH_STORAGE_REFCOUNTED); THStorage_(clearFlag)(finput->storage, TH_STORAGE_REFCOUNTED); // mkl_set_num_threads(1); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *input_t = THTensor_(newSelect)(input, 0, t); THTensor *output_t = THTensor_(newSelect)(output, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); nn_(SpatialConvolutionMM_updateOutput_frame)(input_t, output_t, weight, bias, finput_t, kW, kH, nInputPlane, inputWidth, inputHeight, nOutputPlane, outputWidth, outputHeight); THTensor_(free)(input_t); THTensor_(free)(output_t); THTensor_(free)(finput_t); } THStorage_(setFlag)(input->storage, TH_STORAGE_REFCOUNTED); THStorage_(setFlag)(output->storage, TH_STORAGE_REFCOUNTED); THStorage_(setFlag)(finput->storage, TH_STORAGE_REFCOUNTED); } // mkl_set_num_threads(4); return 1; } static void nn_(SpatialConvolutionMM_updateGradInput_frame)(THTensor *gradInput, THTensor *gradOutput, THTensor *weight, THTensor *fgradInput, int kW, int kH) { THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2], -1); THTensor_(addmm)(fgradInput, 0, fgradInput, 1, weight, gradOutput2d); THTensor_(free)(gradOutput2d); THTensor_(zero)(gradInput); nn_(unfolded_acc)(fgradInput, gradInput, kW, kH, gradInput->size[0], gradInput->size[2], gradInput->size[1], gradOutput->size[2], gradOutput->size[1]); } static int nn_(SpatialConvolutionMM_updateGradInput)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *fgradInput = luaT_getfieldcheckudata(L, 1, "fgradInput", torch_Tensor); THTensor *weight = luaT_getfieldcheckudata(L, 1, "weight", torch_Tensor); THTensor *gradInput = luaT_getfieldcheckudata(L, 1, "gradInput", torch_Tensor); THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); THTensor_(resizeAs)(gradInput, input); THTensor_(resizeAs)(fgradInput, finput); THTensor_(transpose)(weight, weight, 0, 1); if(input->nDimension == 3) { nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput, gradOutput, weight, fgradInput, kW, kH); } else { long T = input->size[0]; long t; THStorage_(clearFlag)(gradInput->storage, TH_STORAGE_REFCOUNTED); THStorage_(clearFlag)(gradOutput->storage, TH_STORAGE_REFCOUNTED); THStorage_(clearFlag)(fgradInput->storage, TH_STORAGE_REFCOUNTED); #pragma omp parallel for private(t) for(t = 0; t < T; t++) { THTensor *gradInput_t = THTensor_(newSelect)(gradInput, 0, t); THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *fgradInput_t = THTensor_(newSelect)(fgradInput, 0, t); nn_(SpatialConvolutionMM_updateGradInput_frame)(gradInput_t, gradOutput_t, weight, fgradInput_t, kW, kH); THTensor_(free)(gradInput_t); THTensor_(free)(gradOutput_t); THTensor_(free)(fgradInput_t); } THStorage_(setFlag)(gradInput->storage, TH_STORAGE_REFCOUNTED); THStorage_(setFlag)(gradOutput->storage, TH_STORAGE_REFCOUNTED); THStorage_(setFlag)(fgradInput->storage, TH_STORAGE_REFCOUNTED); } THTensor_(transpose)(weight, weight, 0, 1); return 1; } static void nn_(SpatialConvolutionMM_accGradParameters_frame)(THTensor *gradOutput, THTensor *gradWeight, THTensor *gradBias, THTensor *finput, real scale) { long i; THTensor *gradOutput2d = THTensor_(newWithStorage2d)(gradOutput->storage, gradOutput->storageOffset, gradOutput->size[0], -1, gradOutput->size[1]*gradOutput->size[2], -1); THTensor_(transpose)(finput, finput, 0, 1); THTensor_(addmm)(gradWeight, 1, gradWeight, scale, gradOutput2d, finput); THTensor_(transpose)(finput, finput, 0, 1); THTensor *gradOutputPlane = THTensor_(new)(); for(i = 0; i < gradBias->size[0]; i++) { long k; real sum = 0; real *data = gradOutput2d->storage->data + gradOutput2d->storageOffset + i*gradOutput2d->stride[0]; for(k = 0; k < gradOutput2d->size[1]; k++) sum += data[k]; (gradBias->storage->data + gradBias->storageOffset)[i] += scale*sum; } THTensor_(free)(gradOutputPlane); THTensor_(free)(gradOutput2d); } static int nn_(SpatialConvolutionMM_accGradParameters)(lua_State *L) { THTensor *input = luaT_checkudata(L, 2, torch_Tensor); THTensor *gradOutput = luaT_checkudata(L, 3, torch_Tensor); real scale = luaL_optnumber(L, 4, 1); int nOutputPlane = luaT_getfieldcheckint(L, 1, "nOutputPlane"); THTensor *finput = luaT_getfieldcheckudata(L, 1, "finput", torch_Tensor); THTensor *gradWeight = luaT_getfieldcheckudata(L, 1, "gradWeight", torch_Tensor); THTensor *gradBias = luaT_getfieldcheckudata(L, 1, "gradBias", torch_Tensor); THArgCheck( nOutputPlane == gradOutput->size[input->nDimension == 4 ? 1 : 0], 1, "Number of output features is not equal to nOutputPlane" ); if(input->nDimension == 3) { nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput, gradWeight, gradBias, finput, scale); } else { long T = input->size[0]; long t; for(t = 0; t < T; t++) { THTensor *gradOutput_t = THTensor_(newSelect)(gradOutput, 0, t); THTensor *finput_t = THTensor_(newSelect)(finput, 0, t); nn_(SpatialConvolutionMM_accGradParameters_frame)(gradOutput_t, gradWeight, gradBias, finput_t, scale); THTensor_(free)(gradOutput_t); THTensor_(free)(finput_t); } } return 0; } static const struct luaL_Reg nn_(SpatialConvolutionMM__) [] = { {"SpatialConvolutionMM_updateOutput", nn_(SpatialConvolutionMM_updateOutput)}, {"SpatialConvolutionMM_updateGradInput", nn_(SpatialConvolutionMM_updateGradInput)}, {"SpatialConvolutionMM_accGradParameters", nn_(SpatialConvolutionMM_accGradParameters)}, {NULL, NULL} }; static void nn_(SpatialConvolutionMM_init)(lua_State *L) { luaT_pushmetatable(L, torch_Tensor); luaT_registeratname(L, nn_(SpatialConvolutionMM__), "nn"); lua_pop(L,1); } #endif
convolutiondepthwise_3x3_pack8_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convdw3x3s1_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const signed char* k0 = kernel.row<const signed char>(g); int* outptr0 = out.row<int>(0); int* outptr1 = out.row<int>(1); const Mat img0 = bottom_blob.channel(g); const signed char* r0 = img0.row<const signed char>(0); const signed char* r1 = img0.row<const signed char>(1); const signed char* r2 = img0.row<const signed char>(2); const signed char* r3 = img0.row<const signed char>(3); int8x8_t _k00 = vld1_s8(k0); int8x8_t _k01 = vld1_s8(k0 + 8); int8x8_t _k02 = vld1_s8(k0 + 16); int8x8_t _k10 = vld1_s8(k0 + 24); int8x8_t _k11 = vld1_s8(k0 + 32); int8x8_t _k12 = vld1_s8(k0 + 40); int8x8_t _k20 = vld1_s8(k0 + 48); int8x8_t _k21 = vld1_s8(k0 + 56); int8x8_t _k22 = vld1_s8(k0 + 64); int i = 0; for (; i + 1 < outh; i += 2) { int j = 0; for (; j + 1 < outw; j += 2) { int8x16_t _r0001 = vld1q_s8(r0); int8x16_t _r0203 = vld1q_s8(r0 + 16); int8x16_t _r1011 = vld1q_s8(r1); int8x16_t _r1213 = vld1q_s8(r1 + 16); int8x16_t _r2021 = vld1q_s8(r2); int8x16_t _r2223 = vld1q_s8(r2 + 16); int8x16_t _r3031 = vld1q_s8(r3); int8x16_t _r3233 = vld1q_s8(r3 + 16); int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00); int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01); int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02); int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10); int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00); int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01); int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02); int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10); int16x8_t _s20 = vmull_s8(vget_low_s8(_r1011), _k00); int16x8_t _s21 = vmull_s8(vget_high_s8(_r1011), _k01); int16x8_t _s22 = vmull_s8(vget_low_s8(_r1213), _k02); int16x8_t _s23 = vmull_s8(vget_low_s8(_r2021), _k10); int16x8_t _s30 = vmull_s8(vget_high_s8(_r1011), _k00); int16x8_t _s31 = vmull_s8(vget_low_s8(_r1213), _k01); int16x8_t _s32 = vmull_s8(vget_high_s8(_r1213), _k02); int16x8_t _s33 = vmull_s8(vget_high_s8(_r2021), _k10); _s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11); _s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12); _s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20); _s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21); _s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11); _s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12); _s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20); _s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21); _s20 = vmlal_s8(_s20, vget_high_s8(_r2021), _k11); _s21 = vmlal_s8(_s21, vget_low_s8(_r2223), _k12); _s22 = vmlal_s8(_s22, vget_low_s8(_r3031), _k20); _s23 = vmlal_s8(_s23, vget_high_s8(_r3031), _k21); _s30 = vmlal_s8(_s30, vget_low_s8(_r2223), _k11); _s31 = vmlal_s8(_s31, vget_high_s8(_r2223), _k12); _s32 = vmlal_s8(_s32, vget_high_s8(_r3031), _k20); _s33 = vmlal_s8(_s33, vget_low_s8(_r3233), _k21); int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22); int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22); int16x8_t _s28 = vmull_s8(vget_low_s8(_r3233), _k22); int16x8_t _s38 = vmull_s8(vget_high_s8(_r3233), _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); int32x4_t _sum20 = vaddl_s16(vget_low_s16(_s20), vget_low_s16(_s21)); int32x4_t _sum21 = vaddl_s16(vget_high_s16(_s20), vget_high_s16(_s21)); int32x4_t _sum22 = vaddl_s16(vget_low_s16(_s22), vget_low_s16(_s23)); int32x4_t _sum23 = vaddl_s16(vget_high_s16(_s22), vget_high_s16(_s23)); int32x4_t _sum30 = vaddl_s16(vget_low_s16(_s30), vget_low_s16(_s31)); int32x4_t _sum31 = vaddl_s16(vget_high_s16(_s30), vget_high_s16(_s31)); int32x4_t _sum32 = vaddl_s16(vget_low_s16(_s32), vget_low_s16(_s33)); int32x4_t _sum33 = vaddl_s16(vget_high_s16(_s32), vget_high_s16(_s33)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum20 = vaddw_s16(_sum20, vget_low_s16(_s28)); _sum21 = vaddw_s16(_sum21, vget_high_s16(_s28)); _sum30 = vaddw_s16(_sum30, vget_low_s16(_s38)); _sum31 = vaddw_s16(_sum31, vget_high_s16(_s38)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); _sum20 = vaddq_s32(_sum20, _sum22); _sum21 = vaddq_s32(_sum21, _sum23); _sum30 = vaddq_s32(_sum30, _sum32); _sum31 = vaddq_s32(_sum31, _sum33); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); vst1q_s32(outptr1, _sum20); vst1q_s32(outptr1 + 4, _sum21); vst1q_s32(outptr1 + 8, _sum30); vst1q_s32(outptr1 + 12, _sum31); r0 += 16; r1 += 16; r2 += 16; r3 += 16; outptr0 += 16; outptr1 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int8x8_t _r30 = vld1_s8(r3); int8x8_t _r31 = vld1_s8(r3 + 8); int8x8_t _r32 = vld1_s8(r3 + 16); int16x8_t _s00 = vmull_s8(_r00, _k00); int16x8_t _s01 = vmull_s8(_r01, _k01); int16x8_t _s02 = vmull_s8(_r02, _k02); int16x8_t _s03 = vmull_s8(_r10, _k10); int16x8_t _s10 = vmull_s8(_r10, _k00); int16x8_t _s11 = vmull_s8(_r11, _k01); int16x8_t _s12 = vmull_s8(_r12, _k02); int16x8_t _s13 = vmull_s8(_r20, _k10); _s00 = vmlal_s8(_s00, _r11, _k11); _s01 = vmlal_s8(_s01, _r12, _k12); _s02 = vmlal_s8(_s02, _r20, _k20); _s03 = vmlal_s8(_s03, _r21, _k21); _s10 = vmlal_s8(_s10, _r21, _k11); _s11 = vmlal_s8(_s11, _r22, _k12); _s12 = vmlal_s8(_s12, _r30, _k20); _s13 = vmlal_s8(_s13, _r31, _k21); int16x8_t _s08 = vmull_s8(_r22, _k22); int16x8_t _s18 = vmull_s8(_r32, _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr1, _sum10); vst1q_s32(outptr1 + 4, _sum11); r0 += 8; r1 += 8; r2 += 8; r3 += 8; outptr0 += 8; outptr1 += 8; } r0 += 2 * 8 + w * 8; r1 += 2 * 8 + w * 8; r2 += 2 * 8 + w * 8; r3 += 2 * 8 + w * 8; outptr0 += outw * 8; outptr1 += outw * 8; } for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { int8x16_t _r0001 = vld1q_s8(r0); int8x16_t _r0203 = vld1q_s8(r0 + 16); int8x16_t _r1011 = vld1q_s8(r1); int8x16_t _r1213 = vld1q_s8(r1 + 16); int8x16_t _r2021 = vld1q_s8(r2); int8x16_t _r2223 = vld1q_s8(r2 + 16); int16x8_t _s00 = vmull_s8(vget_low_s8(_r0001), _k00); int16x8_t _s01 = vmull_s8(vget_high_s8(_r0001), _k01); int16x8_t _s02 = vmull_s8(vget_low_s8(_r0203), _k02); int16x8_t _s03 = vmull_s8(vget_low_s8(_r1011), _k10); int16x8_t _s10 = vmull_s8(vget_high_s8(_r0001), _k00); int16x8_t _s11 = vmull_s8(vget_low_s8(_r0203), _k01); int16x8_t _s12 = vmull_s8(vget_high_s8(_r0203), _k02); int16x8_t _s13 = vmull_s8(vget_high_s8(_r1011), _k10); _s00 = vmlal_s8(_s00, vget_high_s8(_r1011), _k11); _s01 = vmlal_s8(_s01, vget_low_s8(_r1213), _k12); _s02 = vmlal_s8(_s02, vget_low_s8(_r2021), _k20); _s03 = vmlal_s8(_s03, vget_high_s8(_r2021), _k21); _s10 = vmlal_s8(_s10, vget_low_s8(_r1213), _k11); _s11 = vmlal_s8(_s11, vget_high_s8(_r1213), _k12); _s12 = vmlal_s8(_s12, vget_high_s8(_r2021), _k20); _s13 = vmlal_s8(_s13, vget_low_s8(_r2223), _k21); int16x8_t _s08 = vmull_s8(vget_low_s8(_r2223), _k22); int16x8_t _s18 = vmull_s8(vget_high_s8(_r2223), _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); r0 += 16; r1 += 16; r2 += 16; outptr0 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int16x8_t _s0 = vmull_s8(_r00, _k00); int16x8_t _s1 = vmull_s8(_r01, _k01); int16x8_t _s2 = vmull_s8(_r02, _k02); int16x8_t _s3 = vmull_s8(_r10, _k10); _s0 = vmlal_s8(_s0, _r11, _k11); _s1 = vmlal_s8(_s1, _r12, _k12); _s2 = vmlal_s8(_s2, _r20, _k20); _s3 = vmlal_s8(_s3, _r21, _k21); int16x8_t _s4 = vmull_s8(_r22, _k22); int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1)); int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1)); int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3)); int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s4)); _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); r0 += 8; r1 += 8; r2 += 8; outptr0 += 8; } r0 += 2 * 8; r1 += 2 * 8; r2 += 2 * 8; } } } static void convdw3x3s2_pack8_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt) { int w = bottom_blob.w; int outw = top_blob.w; int outh = top_blob.h; const int group = bottom_blob.c; const int tailstep = (w - 2 * outw + w) * 8; #pragma omp parallel for num_threads(opt.num_threads) for (int g = 0; g < group; g++) { Mat out = top_blob.channel(g); const signed char* k0 = kernel.row<const signed char>(g); int* outptr0 = out; const Mat img0 = bottom_blob.channel(g); const signed char* r0 = img0.row<const signed char>(0); const signed char* r1 = img0.row<const signed char>(1); const signed char* r2 = img0.row<const signed char>(2); int8x8_t _k00 = vld1_s8(k0); int8x8_t _k01 = vld1_s8(k0 + 8); int8x8_t _k02 = vld1_s8(k0 + 16); int8x8_t _k10 = vld1_s8(k0 + 24); int8x8_t _k11 = vld1_s8(k0 + 32); int8x8_t _k12 = vld1_s8(k0 + 40); int8x8_t _k20 = vld1_s8(k0 + 48); int8x8_t _k21 = vld1_s8(k0 + 56); int8x8_t _k22 = vld1_s8(k0 + 64); int i = 0; for (; i < outh; i++) { int j = 0; for (; j + 1 < outw; j += 2) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r03 = vld1_s8(r0 + 24); int8x8_t _r04 = vld1_s8(r0 + 32); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r13 = vld1_s8(r1 + 24); int8x8_t _r14 = vld1_s8(r1 + 32); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int8x8_t _r23 = vld1_s8(r2 + 24); int8x8_t _r24 = vld1_s8(r2 + 32); int16x8_t _s00 = vmull_s8(_r00, _k00); int16x8_t _s01 = vmull_s8(_r01, _k01); int16x8_t _s02 = vmull_s8(_r02, _k02); int16x8_t _s03 = vmull_s8(_r10, _k10); int16x8_t _s10 = vmull_s8(_r02, _k00); int16x8_t _s11 = vmull_s8(_r03, _k01); int16x8_t _s12 = vmull_s8(_r04, _k02); int16x8_t _s13 = vmull_s8(_r12, _k10); _s00 = vmlal_s8(_s00, _r11, _k11); _s01 = vmlal_s8(_s01, _r12, _k12); _s02 = vmlal_s8(_s02, _r20, _k20); _s03 = vmlal_s8(_s03, _r21, _k21); _s10 = vmlal_s8(_s10, _r13, _k11); _s11 = vmlal_s8(_s11, _r14, _k12); _s12 = vmlal_s8(_s12, _r22, _k20); _s13 = vmlal_s8(_s13, _r23, _k21); int16x8_t _s08 = vmull_s8(_r22, _k22); int16x8_t _s18 = vmull_s8(_r24, _k22); int32x4_t _sum00 = vaddl_s16(vget_low_s16(_s00), vget_low_s16(_s01)); int32x4_t _sum01 = vaddl_s16(vget_high_s16(_s00), vget_high_s16(_s01)); int32x4_t _sum02 = vaddl_s16(vget_low_s16(_s02), vget_low_s16(_s03)); int32x4_t _sum03 = vaddl_s16(vget_high_s16(_s02), vget_high_s16(_s03)); int32x4_t _sum10 = vaddl_s16(vget_low_s16(_s10), vget_low_s16(_s11)); int32x4_t _sum11 = vaddl_s16(vget_high_s16(_s10), vget_high_s16(_s11)); int32x4_t _sum12 = vaddl_s16(vget_low_s16(_s12), vget_low_s16(_s13)); int32x4_t _sum13 = vaddl_s16(vget_high_s16(_s12), vget_high_s16(_s13)); _sum00 = vaddw_s16(_sum00, vget_low_s16(_s08)); _sum01 = vaddw_s16(_sum01, vget_high_s16(_s08)); _sum10 = vaddw_s16(_sum10, vget_low_s16(_s18)); _sum11 = vaddw_s16(_sum11, vget_high_s16(_s18)); _sum00 = vaddq_s32(_sum00, _sum02); _sum01 = vaddq_s32(_sum01, _sum03); _sum10 = vaddq_s32(_sum10, _sum12); _sum11 = vaddq_s32(_sum11, _sum13); vst1q_s32(outptr0, _sum00); vst1q_s32(outptr0 + 4, _sum01); vst1q_s32(outptr0 + 8, _sum10); vst1q_s32(outptr0 + 12, _sum11); r0 += 32; r1 += 32; r2 += 32; outptr0 += 16; } for (; j < outw; j++) { int8x8_t _r00 = vld1_s8(r0); int8x8_t _r01 = vld1_s8(r0 + 8); int8x8_t _r02 = vld1_s8(r0 + 16); int8x8_t _r10 = vld1_s8(r1); int8x8_t _r11 = vld1_s8(r1 + 8); int8x8_t _r12 = vld1_s8(r1 + 16); int8x8_t _r20 = vld1_s8(r2); int8x8_t _r21 = vld1_s8(r2 + 8); int8x8_t _r22 = vld1_s8(r2 + 16); int16x8_t _s0 = vmull_s8(_r00, _k00); int16x8_t _s1 = vmull_s8(_r01, _k01); int16x8_t _s2 = vmull_s8(_r02, _k02); int16x8_t _s3 = vmull_s8(_r10, _k10); _s0 = vmlal_s8(_s0, _r11, _k11); _s1 = vmlal_s8(_s1, _r12, _k12); _s2 = vmlal_s8(_s2, _r20, _k20); _s3 = vmlal_s8(_s3, _r21, _k21); int16x8_t _s4 = vmull_s8(_r22, _k22); int32x4_t _sum0 = vaddl_s16(vget_low_s16(_s0), vget_low_s16(_s1)); int32x4_t _sum1 = vaddl_s16(vget_high_s16(_s0), vget_high_s16(_s1)); int32x4_t _sum2 = vaddl_s16(vget_low_s16(_s2), vget_low_s16(_s3)); int32x4_t _sum3 = vaddl_s16(vget_high_s16(_s2), vget_high_s16(_s3)); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s4)); _sum1 = vaddw_s16(_sum1, vget_high_s16(_s4)); _sum0 = vaddq_s32(_sum0, _sum2); _sum1 = vaddq_s32(_sum1, _sum3); vst1q_s32(outptr0, _sum0); vst1q_s32(outptr0 + 4, _sum1); r0 += 16; r1 += 16; r2 += 16; outptr0 += 8; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } }
SPHCalcHydroForceFunctor.h
/** * @file SPHCalcHydroForceFunctor.h * @author seckler * @date 22.01.18 */ #pragma once #include "autopas/particles/OwnershipState.h" #include "autopas/sph/SPHKernels.h" namespace autopas::sph { /** * Class that defines the hydrodynamic force functor. * It is used to calculate the force based on the given SPH kernels. * @tparam Particle * @tparam ParticleCell */ template <class Particle> class SPHCalcHydroForceFunctor : public Functor<Particle, SPHCalcHydroForceFunctor<Particle>> { public: /// soa arrays type using SoAArraysType = typename Particle::SoAArraysType; SPHCalcHydroForceFunctor() // the actual cutoff used is dynamic. 0 is used to pass the sanity check. : autopas::Functor<Particle, SPHCalcHydroForceFunctor<Particle>>(0.){}; bool isRelevantForTuning() override { return true; } bool allowsNewton3() override { return true; } bool allowsNonNewton3() override { return true; } /** * Calculates the contribution of the interaction of particle i and j to the * hydrodynamic force. * It is not symmetric, because the smoothing lenghts of the two particles can * be different. * @param i first particle of the interaction * @param j second particle of the interaction * @param newton3 defines whether or whether not to use newton 3 */ void AoSFunctor(Particle &i, Particle &j, bool newton3 = true) override { if (i.isDummy() or j.isDummy()) { return; } const std::array<double, 3> dr = utils::ArrayMath::sub(i.getR(), j.getR()); // const PS::F64vec dr = ep_i[i].pos - ep_j[j].pos; double cutoff = i.getSmoothingLength() * autopas::sph::SPHKernels::getKernelSupportRadius(); if (autopas::utils::ArrayMath::dot(dr, dr) >= cutoff * cutoff) { return; } const std::array<double, 3> dv = utils::ArrayMath::sub(i.getV(), j.getV()); // const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel; double dvdr = utils::ArrayMath::dot(dv, dr); const double w_ij = (dvdr < 0) ? dvdr / utils::ArrayMath::L2Norm(dr) : 0; // const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0; const double v_sig = i.getSoundSpeed() + j.getSoundSpeed() - 3.0 * w_ij; // const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij; i.checkAndSetVSigMax(v_sig); if (newton3) { j.checkAndSetVSigMax(v_sig); // Newton 3 // v_sig_max = std::max(v_sig_max, v_sig); } const double AV = -0.5 * v_sig * w_ij / (0.5 * (i.getDensity() + j.getDensity())); // const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens + // ep_j[j].dens)); const std::array<double, 3> gradW_ij = utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW(dr, i.getSmoothingLength()), SPHKernels::gradW(dr, j.getSmoothingLength())), 0.5); // const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr, // ep_j[j].smth)); double scale = i.getPressure() / (i.getDensity() * i.getDensity()) + j.getPressure() / (j.getDensity() * j.getDensity()) + AV; i.subAcceleration(utils::ArrayMath::mulScalar(gradW_ij, scale * j.getMass())); // hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) * // gradW_ij; if (newton3) { j.addAcceleration(utils::ArrayMath::mulScalar(gradW_ij, scale * i.getMass())); // Newton3, gradW_ij = -gradW_ji } double scale2i = j.getMass() * (i.getPressure() / (i.getDensity() * i.getDensity()) + 0.5 * AV); i.addEngDot(utils::ArrayMath::dot(gradW_ij, dv) * scale2i); // hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij; if (newton3) { double scale2j = i.getMass() * (j.getPressure() / (j.getDensity() * j.getDensity()) + 0.5 * AV); j.addEngDot(utils::ArrayMath::dot(gradW_ij, dv) * scale2j); // Newton 3 } } /** * @copydoc Functor::SoAFunctorSingle(SoAView<SoAArraysType>, bool) * This functor ignores the newton3 value, as we do not expect any benefit from disabling newton3. */ void SoAFunctorSingle(SoAView<SoAArraysType> soa, bool newton3) override { if (soa.getNumParticles() == 0) return; double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>(); double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>(); double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>(); double *const __restrict soundSpeedptr = soa.template begin<Particle::AttributeNames::soundSpeed>(); double *const __restrict pressureptr = soa.template begin<Particle::AttributeNames::pressure>(); double *const __restrict vsigmaxptr = soa.template begin<Particle::AttributeNames::vsigmax>(); double *const __restrict engDotptr = soa.template begin<Particle::AttributeNames::engDot>(); double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>(); double *const __restrict velXptr = soa.template begin<Particle::AttributeNames::velX>(); double *const __restrict velYptr = soa.template begin<Particle::AttributeNames::velY>(); double *const __restrict velZptr = soa.template begin<Particle::AttributeNames::velZ>(); double *const __restrict accXptr = soa.template begin<Particle::AttributeNames::accX>(); double *const __restrict accYptr = soa.template begin<Particle::AttributeNames::accY>(); double *const __restrict accZptr = soa.template begin<Particle::AttributeNames::accZ>(); const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>(); for (unsigned int indexFirst = 0; indexFirst < soa.getNumParticles(); ++indexFirst) { // checks whether particle i is owned. if (ownedStatePtr[indexFirst] == OwnershipState::dummy) { continue; } double localvsigmax = 0.; double localengdotsum = 0.; double localAccX = 0.; double localAccY = 0.; double localAccZ = 0.; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations //#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax) for (unsigned int j = indexFirst + 1; j < soa.getNumParticles(); ++j) { const double drx = xptr[indexFirst] - xptr[j]; const double dry = yptr[indexFirst] - yptr[j]; const double drz = zptr[indexFirst] - zptr[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; double cutoff = smthptr[indexFirst] * autopas::sph::SPHKernels::getKernelSupportRadius(); if (dr2 >= cutoff * cutoff or ownedStatePtr[j] == OwnershipState::dummy) continue; const double dvX = velXptr[indexFirst] - velXptr[j]; const double dvY = velYptr[indexFirst] - velYptr[j]; const double dvZ = velZptr[indexFirst] - velZptr[j]; // const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel; double dvdr = dvX * drx + dvY * dry + dvZ * drz; const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0; // const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0; const double v_sig = soundSpeedptr[indexFirst] + soundSpeedptr[j] - 3.0 * w_ij; // const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij; localvsigmax = std::max(localvsigmax, v_sig); // vsigmaxptr[j] = std::max(vsigmaxptr[j], v_sig); // Newton 3 vsigmaxptr[j] = vsigmaxptr[j] > v_sig ? vsigmaxptr[j] : v_sig; // Newton 3 // v_sig_max = std::max(v_sig_max, v_sig); const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr[indexFirst] + densityptr[j])); // const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens + // ep_j[j].dens)); const std::array<double, 3> gradW_ij = utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr[indexFirst]), SPHKernels::gradW({drx, dry, drz}, smthptr[j])), 0.5); // const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr, // ep_j[j].smth)); double scale = pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) + pressureptr[j] / (densityptr[j] * densityptr[j]) + AV; const double massscale = scale * massptr[j]; localAccX -= gradW_ij[0] * massscale; localAccY -= gradW_ij[1] * massscale; localAccZ -= gradW_ij[2] * massscale; // hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) * // gradW_ij; const double massscale2 = scale * massptr[indexFirst]; accXptr[j] += gradW_ij[0] * massscale2; accYptr[j] += gradW_ij[1] * massscale2; accZptr[j] += gradW_ij[2] * massscale2; // Newton3, gradW_ij = -gradW_ji double scale2i = massptr[j] * (pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) + 0.5 * AV); localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i; // hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij; double scale2j = massptr[indexFirst] * (pressureptr[j] / (densityptr[j] * densityptr[j]) + 0.5 * AV); engDotptr[j] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j; // Newton 3 } engDotptr[indexFirst] += localengdotsum; accXptr[indexFirst] += localAccX; accYptr[indexFirst] += localAccY; accZptr[indexFirst] += localAccZ; vsigmaxptr[indexFirst] = std::max(localvsigmax, vsigmaxptr[indexFirst]); } } /** * @copydoc Functor::SoAFunctorPair(SoAView<SoAArraysType>, SoAView<SoAArraysType>, bool) */ void SoAFunctorPair(SoAView<SoAArraysType> soa1, SoAView<SoAArraysType> soa2, bool newton3) override { if (soa1.getNumParticles() == 0 || soa2.getNumParticles() == 0) return; double *const __restrict massptr1 = soa1.template begin<Particle::AttributeNames::mass>(); double *const __restrict densityptr1 = soa1.template begin<Particle::AttributeNames::density>(); double *const __restrict smthptr1 = soa1.template begin<Particle::AttributeNames::smth>(); double *const __restrict soundSpeedptr1 = soa1.template begin<Particle::AttributeNames::soundSpeed>(); double *const __restrict pressureptr1 = soa1.template begin<Particle::AttributeNames::pressure>(); double *const __restrict vsigmaxptr1 = soa1.template begin<Particle::AttributeNames::vsigmax>(); double *const __restrict engDotptr1 = soa1.template begin<Particle::AttributeNames::engDot>(); double *const __restrict xptr1 = soa1.template begin<Particle::AttributeNames::posX>(); double *const __restrict yptr1 = soa1.template begin<Particle::AttributeNames::posY>(); double *const __restrict zptr1 = soa1.template begin<Particle::AttributeNames::posZ>(); double *const __restrict velXptr1 = soa1.template begin<Particle::AttributeNames::velX>(); double *const __restrict velYptr1 = soa1.template begin<Particle::AttributeNames::velY>(); double *const __restrict velZptr1 = soa1.template begin<Particle::AttributeNames::velZ>(); double *const __restrict accXptr1 = soa1.template begin<Particle::AttributeNames::accX>(); double *const __restrict accYptr1 = soa1.template begin<Particle::AttributeNames::accY>(); double *const __restrict accZptr1 = soa1.template begin<Particle::AttributeNames::accZ>(); double *const __restrict massptr2 = soa2.template begin<Particle::AttributeNames::mass>(); double *const __restrict densityptr2 = soa2.template begin<Particle::AttributeNames::density>(); double *const __restrict smthptr2 = soa2.template begin<Particle::AttributeNames::smth>(); double *const __restrict soundSpeedptr2 = soa2.template begin<Particle::AttributeNames::soundSpeed>(); double *const __restrict pressureptr2 = soa2.template begin<Particle::AttributeNames::pressure>(); double *const __restrict vsigmaxptr2 = soa2.template begin<Particle::AttributeNames::vsigmax>(); double *const __restrict engDotptr2 = soa2.template begin<Particle::AttributeNames::engDot>(); double *const __restrict xptr2 = soa2.template begin<Particle::AttributeNames::posX>(); double *const __restrict yptr2 = soa2.template begin<Particle::AttributeNames::posY>(); double *const __restrict zptr2 = soa2.template begin<Particle::AttributeNames::posZ>(); double *const __restrict velXptr2 = soa2.template begin<Particle::AttributeNames::velX>(); double *const __restrict velYptr2 = soa2.template begin<Particle::AttributeNames::velY>(); double *const __restrict velZptr2 = soa2.template begin<Particle::AttributeNames::velZ>(); double *const __restrict accXptr2 = soa2.template begin<Particle::AttributeNames::accX>(); double *const __restrict accYptr2 = soa2.template begin<Particle::AttributeNames::accY>(); double *const __restrict accZptr2 = soa2.template begin<Particle::AttributeNames::accZ>(); const auto *const __restrict ownedStatePtr1 = soa1.template begin<Particle::AttributeNames::ownershipState>(); const auto *const __restrict ownedStatePtr2 = soa2.template begin<Particle::AttributeNames::ownershipState>(); for (unsigned int indexFirst = 0; indexFirst < soa1.getNumParticles(); ++indexFirst) { // checks whether particle i is owned. if (ownedStatePtr1[indexFirst] == OwnershipState::dummy) { continue; } double localvsigmax = 0.; double localengdotsum = 0.; double localAccX = 0.; double localAccY = 0.; double localAccZ = 0.; // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations //#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax) for (unsigned int j = 0; j < soa2.getNumParticles(); ++j) { const double drx = xptr1[indexFirst] - xptr2[j]; const double dry = yptr1[indexFirst] - yptr2[j]; const double drz = zptr1[indexFirst] - zptr2[j]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; double cutoff = smthptr1[indexFirst] * autopas::sph::SPHKernels::getKernelSupportRadius(); if (dr2 >= cutoff * cutoff or ownedStatePtr2[j] == OwnershipState::dummy) continue; const double dvX = velXptr1[indexFirst] - velXptr2[j]; const double dvY = velYptr1[indexFirst] - velYptr2[j]; const double dvZ = velZptr1[indexFirst] - velZptr2[j]; // const PS::F64vec dv = ep_i[i].vel - ep_j[j].vel; double dvdr = dvX * drx + dvY * dry + dvZ * drz; const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0; // const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0; const double v_sig = soundSpeedptr1[indexFirst] + soundSpeedptr2[j] - 3.0 * w_ij; // const PS::F64 v_sig = ep_i[i].snds + ep_j[j].snds - 3.0 * w_ij; localvsigmax = std::max(localvsigmax, v_sig); if (newton3) { // vsigmaxptr2[j] = std::max(vsigmaxptr2[j], v_sig); // Newton 3 vsigmaxptr2[j] = vsigmaxptr2[j] > v_sig ? vsigmaxptr2[j] : v_sig; // Newton 3 // v_sig_max = std::max(v_sig_max, v_sig); } const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr1[indexFirst] + densityptr2[j])); // const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens + // ep_j[j].dens)); const std::array<double, 3> gradW_ij = utils::ArrayMath::mulScalar(utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr1[indexFirst]), SPHKernels::gradW({drx, dry, drz}, smthptr2[j])), 0.5); // const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr, // ep_j[j].smth)); double scale = pressureptr1[indexFirst] / (densityptr1[indexFirst] * densityptr1[indexFirst]) + pressureptr2[j] / (densityptr2[j] * densityptr2[j]) + AV; const double massscale = scale * massptr2[j]; localAccX -= gradW_ij[0] * massscale; localAccY -= gradW_ij[1] * massscale; localAccZ -= gradW_ij[2] * massscale; // hydro[i].acc -= ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + ep_j[j].pres / (ep_j[j].dens * ep_j[j].dens) + AV) * // gradW_ij; if (newton3) { const double massscale = scale * massptr1[indexFirst]; accXptr2[j] += gradW_ij[0] * massscale; accYptr2[j] += gradW_ij[1] * massscale; accZptr2[j] += gradW_ij[2] * massscale; // Newton3, gradW_ij = -gradW_ji } double scale2i = massptr2[j] * (pressureptr1[indexFirst] / (densityptr1[indexFirst] * densityptr1[indexFirst]) + 0.5 * AV); localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i; // hydro[i].eng_dot += ep_j[j].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij; if (newton3) { double scale2j = massptr1[indexFirst] * (pressureptr2[j] / (densityptr2[j] * densityptr2[j]) + 0.5 * AV); engDotptr2[j] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j; // Newton 3 } } engDotptr1[indexFirst] += localengdotsum; accXptr1[indexFirst] += localAccX; accYptr1[indexFirst] += localAccY; accZptr1[indexFirst] += localAccZ; vsigmaxptr1[indexFirst] = std::max(localvsigmax, vsigmaxptr1[indexFirst]); } } // clang-format off /** * @copydoc Functor::SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3) */ // clang-format on void SoAFunctorVerlet(SoAView<SoAArraysType> soa, const size_t indexFirst, const std::vector<size_t, autopas::AlignedAllocator<size_t>> &neighborList, bool newton3) override { if (soa.getNumParticles() == 0) return; const auto *const __restrict ownedStatePtr = soa.template begin<Particle::AttributeNames::ownershipState>(); // checks whether particle i is owned. if (ownedStatePtr[indexFirst] == OwnershipState::dummy) { return; } double *const __restrict massptr = soa.template begin<Particle::AttributeNames::mass>(); double *const __restrict densityptr = soa.template begin<Particle::AttributeNames::density>(); double *const __restrict smthptr = soa.template begin<Particle::AttributeNames::smth>(); double *const __restrict soundSpeedptr = soa.template begin<Particle::AttributeNames::soundSpeed>(); double *const __restrict pressureptr = soa.template begin<Particle::AttributeNames::pressure>(); double *const __restrict vsigmaxptr = soa.template begin<Particle::AttributeNames::vsigmax>(); double *const __restrict engDotptr = soa.template begin<Particle::AttributeNames::engDot>(); double *const __restrict xptr = soa.template begin<Particle::AttributeNames::posX>(); double *const __restrict yptr = soa.template begin<Particle::AttributeNames::posY>(); double *const __restrict zptr = soa.template begin<Particle::AttributeNames::posZ>(); double *const __restrict velXptr = soa.template begin<Particle::AttributeNames::velX>(); double *const __restrict velYptr = soa.template begin<Particle::AttributeNames::velY>(); double *const __restrict velZptr = soa.template begin<Particle::AttributeNames::velZ>(); double *const __restrict accXptr = soa.template begin<Particle::AttributeNames::accX>(); double *const __restrict accYptr = soa.template begin<Particle::AttributeNames::accY>(); double *const __restrict accZptr = soa.template begin<Particle::AttributeNames::accZ>(); double localvsigmax = 0.; double localengdotsum = 0.; double localAccX = 0.; double localAccY = 0.; double localAccZ = 0.; const auto &currentList = neighborList; size_t listSize = currentList.size(); // icpc vectorizes this. // g++ only with -ffast-math or -funsafe-math-optimizations //#pragma omp simd reduction(+ : localengdotsum, localAccX, localAccY, localAccZ), reduction(max : localvsigmax) for (unsigned int j = 0; j < listSize; ++j) { const double drx = xptr[indexFirst] - xptr[currentList[j]]; const double dry = yptr[indexFirst] - yptr[currentList[j]]; const double drz = zptr[indexFirst] - zptr[currentList[j]]; const double drx2 = drx * drx; const double dry2 = dry * dry; const double drz2 = drz * drz; const double dr2 = drx2 + dry2 + drz2; double cutoff = smthptr[indexFirst] * autopas::sph::SPHKernels::getKernelSupportRadius(); if (dr2 >= cutoff * cutoff or ownedStatePtr[currentList[j]] == OwnershipState::dummy) continue; const double dvX = velXptr[indexFirst] - velXptr[currentList[j]]; const double dvY = velYptr[indexFirst] - velYptr[currentList[j]]; const double dvZ = velZptr[indexFirst] - velZptr[currentList[j]]; // const PS::F64vec dv = ep_i[i].vel - ep_j[currentList[j]].vel; double dvdr = dvX * drx + dvY * dry + dvZ * drz; const double w_ij = (dvdr < 0) ? dvdr / sqrt(dr2) : 0; // const PS::F64 w_ij = (dv * dr < 0) ? dv * dr / sqrt(dr * dr) : 0; const double v_sig = soundSpeedptr[indexFirst] + soundSpeedptr[currentList[j]] - 3.0 * w_ij; // const PS::F64 v_sig = ep_i[i].snds + ep_j[currentList[j]].snds - 3.0 * w_ij; localvsigmax = std::max(localvsigmax, v_sig); if (newton3) { // vsigmaxptr[currentList[j]] = std::max(vsigmaxptr[currentList[j]], v_sig); // Newton 3 vsigmaxptr[currentList[j]] = vsigmaxptr[currentList[j]] > v_sig ? vsigmaxptr[currentList[j]] : v_sig; // Newton 3 // v_sig_max = std::max(v_sig_max, v_sig); } const double AV = -0.5 * v_sig * w_ij / (0.5 * (densityptr[indexFirst] + densityptr[currentList[j]])); // const PS::F64 AV = - 0.5 * v_sig * w_ij / (0.5 * (ep_i[i].dens + // ep_j[currentList[j]].dens)); const std::array<double, 3> gradW_ij = utils::ArrayMath::mulScalar( utils::ArrayMath::add(SPHKernels::gradW({drx, dry, drz}, smthptr[indexFirst]), SPHKernels::gradW({drx, dry, drz}, smthptr[currentList[j]])), 0.5); // const PS::F64vec gradW_ij = 0.5 * (gradW(dr, ep_i[i].smth) + gradW(dr, // ep_j[currentList[j]].smth)); double scale = pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) + pressureptr[currentList[j]] / (densityptr[currentList[j]] * densityptr[currentList[j]]) + AV; const double massscale = scale * massptr[currentList[j]]; localAccX -= gradW_ij[0] * massscale; localAccY -= gradW_ij[1] * massscale; localAccZ -= gradW_ij[2] * massscale; // hydro[i].acc -= ep_j[currentList[j]].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + ep_j[currentList[j]].pres / (ep_j[currentList[j]].dens * ep_j[currentList[j]].dens) + AV) * // gradW_ij; if (newton3) { const double massscale = scale * massptr[indexFirst]; accXptr[currentList[j]] += gradW_ij[0] * massscale; accYptr[currentList[j]] += gradW_ij[1] * massscale; accZptr[currentList[j]] += gradW_ij[2] * massscale; // Newton3, gradW_ij = -gradW_ji } double scale2i = massptr[currentList[j]] * (pressureptr[indexFirst] / (densityptr[indexFirst] * densityptr[indexFirst]) + 0.5 * AV); localengdotsum += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2i; // hydro[i].eng_dot += ep_j[currentList[j]].mass * (ep_i[i].pres / (ep_i[i].dens * // ep_i[i].dens) + 0.5 * AV) * dv * gradW_ij; if (newton3) { double scale2j = massptr[indexFirst] * (pressureptr[currentList[j]] / (densityptr[currentList[j]] * densityptr[currentList[j]]) + 0.5 * AV); engDotptr[currentList[j]] += (gradW_ij[0] * dvX + gradW_ij[1] * dvY + gradW_ij[2] * dvZ) * scale2j; // Newton 3 } } engDotptr[indexFirst] += localengdotsum; accXptr[indexFirst] += localAccX; accYptr[indexFirst] += localAccY; accZptr[indexFirst] += localAccZ; vsigmaxptr[indexFirst] = std::max(localvsigmax, vsigmaxptr[indexFirst]); } /** * @copydoc Functor::getNeededAttr() */ constexpr static auto getNeededAttr() { return std::array<typename Particle::AttributeNames, 17>{ Particle::AttributeNames::mass, Particle::AttributeNames::density, Particle::AttributeNames::smth, Particle::AttributeNames::soundSpeed, Particle::AttributeNames::pressure, Particle::AttributeNames::vsigmax, Particle::AttributeNames::engDot, Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ, Particle::AttributeNames::velX, Particle::AttributeNames::velY, Particle::AttributeNames::velZ, Particle::AttributeNames::accX, Particle::AttributeNames::accY, Particle::AttributeNames::accZ, Particle::AttributeNames::ownershipState}; } /** * @copydoc Functor::getNeededAttr(std::false_type) */ constexpr static auto getNeededAttr(std::false_type) { return std::array<typename Particle::AttributeNames, 12>{ Particle::AttributeNames::mass, Particle::AttributeNames::density, Particle::AttributeNames::smth, Particle::AttributeNames::soundSpeed, Particle::AttributeNames::pressure, Particle::AttributeNames::posX, Particle::AttributeNames::posY, Particle::AttributeNames::posZ, Particle::AttributeNames::velX, Particle::AttributeNames::velY, Particle::AttributeNames::velZ, Particle::AttributeNames::ownershipState}; } /** * @copydoc Functor::getComputedAttr() */ constexpr static auto getComputedAttr() { return std::array<typename Particle::AttributeNames, 6>{ Particle::AttributeNames::vsigmax, Particle::AttributeNames::engDot, Particle::AttributeNames::accX, Particle::AttributeNames::accY, Particle::AttributeNames::accZ, Particle::AttributeNames::ownershipState}; } /** * Get the number of floating point operations used in one full kernel call * @return the number of floating point operations */ static uint64_t getNumFlopsPerKernelCall() { ///@todo return correct flopcount return 1ul; } }; } // namespace autopas::sph
forest.h
#pragma once #include "tree.h" template <typename SplitFunctionT, typename LeafFunctionT> class Forest { public: Forest() {} virtual ~Forest() {} std::shared_ptr<LeafFunctionT> inferencest(const SamplePtr& sample) const { int n_trees = trees_.size(); std::vector<std::shared_ptr<LeafFunctionT>> fcns; //inference of individual trees for(int tree_idx = 0; tree_idx < n_trees; ++tree_idx) { std::shared_ptr<LeafFunctionT> tree_fcn = trees_[tree_idx]->inference(sample); fcns.push_back(tree_fcn); } //combine tree fcns/results and collect all results return fcns[0]->Reduce(fcns); } std::vector<std::shared_ptr<LeafFunctionT>> inferencemt(const std::vector<SamplePtr>& samples, int n_threads) const { std::vector<std::shared_ptr<LeafFunctionT>> targets(samples.size()); omp_set_num_threads(n_threads); #pragma omp parallel for for(size_t sample_idx = 0; sample_idx < samples.size(); ++sample_idx) { targets[sample_idx] = inferencest(samples[sample_idx]); } return targets; } std::vector<std::shared_ptr<LeafFunctionT>> inferencemt(const std::vector<TrainDatum>& samples, int n_threads) const { std::vector<std::shared_ptr<LeafFunctionT>> targets(samples.size()); omp_set_num_threads(n_threads); #pragma omp parallel for for(size_t sample_idx = 0; sample_idx < samples.size(); ++sample_idx) { targets[sample_idx] = inferencest(samples[sample_idx].sample); } return targets; } void AddTree(std::shared_ptr<Tree<SplitFunctionT, LeafFunctionT>> tree) { trees_.push_back(tree); } size_t trees_size() const { return trees_.size(); } // TreePtr trees(int idx) const { return trees_[idx]; } virtual void Save(SerializationOut& ar) const { size_t n_trees = trees_.size(); std::cout << "[DEBUG] write " << n_trees << " trees" << std::endl; ar << n_trees; if(true) std::cout << "[Forest][write] write number of trees " << n_trees << std::endl; for(size_t tree_idx = 0; tree_idx < trees_.size(); ++tree_idx) { if(true) std::cout << "[Forest][write] write tree nb. " << tree_idx << std::endl; trees_[tree_idx]->Save(ar); } } virtual void Load(SerializationIn& ar) { size_t n_trees; ar >> n_trees; if(true) std::cout << "[Forest][read] nTrees: " << n_trees << std::endl; trees_.clear(); for(size_t i = 0; i < n_trees; ++i) { if(true) std::cout << "[Forest][read] read tree " << (i+1) << " of " << n_trees << " - " << std::endl; auto tree = std::make_shared<Tree<SplitFunctionT, LeafFunctionT>>(); tree->Load(ar); trees_.push_back(tree); if(true) std::cout << "[Forest][read] finished read tree " << (i+1) << " of " << n_trees << std::endl; } } private: std::vector<std::shared_ptr<Tree<SplitFunctionT, LeafFunctionT>>> trees_; };
z_solve.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB BT code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "work_lhs.h" //#include "timers.h" //--------------------------------------------------------------------- // Performs line solves in Z direction by first factoring // the block-tridiagonal matrix into an upper triangular matrix, // and then performing back substitution to solve for the unknow // vectors of each line. // // Make sure we treat elements zero to cell_size in the direction // of the sweep. //--------------------------------------------------------------------- void z_solve() { int i, j, k, m, n, ksize, z; double pivot, coeff; int gp12, gp02; double fjacZ[5][5][PROBLEM_SIZE+1][IMAXP-1][JMAXP-1]; double njacZ[5][5][PROBLEM_SIZE+1][IMAXP-1][JMAXP-1]; double lhsZ[5][5][3][PROBLEM_SIZE][IMAXP-1][JMAXP-1]; double temp1, temp2, temp3; gp12 = grid_points[1]-2; gp02 = grid_points[0]-2; //--------------------------------------------------------------------- // This function computes the left hand side for the three z-factors //--------------------------------------------------------------------- ksize = grid_points[2]-1; //--------------------------------------------------------------------- // Compute the indices for storing the block-diagonal matrix; // determine c (labeled f) and s jacobians //--------------------------------------------------------------------- #pragma omp target data map(alloc:lhsZ[:][:][:][:][:][:],fjacZ[:][:][:][:][:],njacZ[:][:][:][:][:]) //present(rho_i,u,qs,rhs,square) { #pragma omp target teams distribute parallel for collapse(2) private(i,j,k,temp1,temp2,temp3) for (k = 0; k <= ksize; k++) { for (i = 1; i <= gp02; i++) { for (j = 1; j <= gp12; j++) { temp1 = 1.0 / u[k][j][i][0]; temp2 = temp1 * temp1; temp3 = temp1 * temp2; fjacZ[0][0][k][i][j] = 0.0; fjacZ[0][1][k][i][j] = 0.0; fjacZ[0][2][k][i][j] = 0.0; fjacZ[0][3][k][i][j] = 1.0; fjacZ[0][4][k][i][j] = 0.0; fjacZ[1][0][k][i][j] = - ( u[k][j][i][1]*u[k][j][i][3] ) * temp2; fjacZ[1][1][k][i][j] = u[k][j][i][3] * temp1; fjacZ[1][2][k][i][j] = 0.0; fjacZ[1][3][k][i][j] = u[k][j][i][1] * temp1; fjacZ[1][4][k][i][j] = 0.0; fjacZ[2][0][k][i][j] = - ( u[k][j][i][2]*u[k][j][i][3] ) * temp2; fjacZ[2][1][k][i][j] = 0.0; fjacZ[2][2][k][i][j] = u[k][j][i][3] * temp1; fjacZ[2][3][k][i][j] = u[k][j][i][2] * temp1; fjacZ[2][4][k][i][j] = 0.0; fjacZ[3][0][k][i][j] = - (u[k][j][i][3]*u[k][j][i][3] * temp2 ) + c2 * qs[k][j][i]; fjacZ[3][1][k][i][j] = - c2 * u[k][j][i][1] * temp1; fjacZ[3][2][k][i][j] = - c2 * u[k][j][i][2] * temp1; fjacZ[3][3][k][i][j] = ( 2.0 - c2 ) * u[k][j][i][3] * temp1; fjacZ[3][4][k][i][j] = c2; fjacZ[4][0][k][i][j] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] ) * u[k][j][i][3] * temp2; fjacZ[4][1][k][i][j] = - c2 * ( u[k][j][i][1]*u[k][j][i][3] ) * temp2; fjacZ[4][2][k][i][j] = - c2 * ( u[k][j][i][2]*u[k][j][i][3] ) * temp2; fjacZ[4][3][k][i][j] = c1 * ( u[k][j][i][4] * temp1 ) - c2 * ( qs[k][j][i] + u[k][j][i][3]*u[k][j][i][3] * temp2 ); fjacZ[4][4][k][i][j] = c1 * u[k][j][i][3] * temp1; njacZ[0][0][k][i][j] = 0.0; njacZ[0][1][k][i][j] = 0.0; njacZ[0][2][k][i][j] = 0.0; njacZ[0][3][k][i][j] = 0.0; njacZ[0][4][k][i][j] = 0.0; njacZ[1][0][k][i][j] = - c3c4 * temp2 * u[k][j][i][1]; njacZ[1][1][k][i][j] = c3c4 * temp1; njacZ[1][2][k][i][j] = 0.0; njacZ[1][3][k][i][j] = 0.0; njacZ[1][4][k][i][j] = 0.0; njacZ[2][0][k][i][j] = - c3c4 * temp2 * u[k][j][i][2]; njacZ[2][1][k][i][j] = 0.0; njacZ[2][2][k][i][j] = c3c4 * temp1; njacZ[2][3][k][i][j] = 0.0; njacZ[2][4][k][i][j] = 0.0; njacZ[3][0][k][i][j] = - con43 * c3c4 * temp2 * u[k][j][i][3]; njacZ[3][1][k][i][j] = 0.0; njacZ[3][2][k][i][j] = 0.0; njacZ[3][3][k][i][j] = con43 * c3c4 * temp1; njacZ[3][4][k][i][j] = 0.0; njacZ[4][0][k][i][j] = - ( c3c4 - c1345 ) * temp3 * (u[k][j][i][1]*u[k][j][i][1]) - ( c3c4 - c1345 ) * temp3 * (u[k][j][i][2]*u[k][j][i][2]) - ( con43 * c3c4 - c1345 ) * temp3 * (u[k][j][i][3]*u[k][j][i][3]) - c1345 * temp2 * u[k][j][i][4]; njacZ[4][1][k][i][j] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][1]; njacZ[4][2][k][i][j] = ( c3c4 - c1345 ) * temp2 * u[k][j][i][2]; njacZ[4][3][k][i][j] = ( con43 * c3c4 - c1345 ) * temp2 * u[k][j][i][3]; njacZ[4][4][k][i][j] = ( c1345 )* temp1; } } } //--------------------------------------------------------------------- // now jacobians set, so form left hand side in z direction //--------------------------------------------------------------------- //lhsZ[j][i]init(lhsZ[j][i], ksize); // zero the whole left hand side for starters #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j) collapse(3) #else #pragma omp target teams distribute parallel for simd collapse(4) #endif for (m = 0; m < 5; m++) { for (n = 0; n < 5; n++) { for (i = 1; i <= gp02; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= gp12; j++) { lhsZ[m][n][0][0][i][j] = 0.0; lhsZ[m][n][1][0][i][j] = 0.0; lhsZ[m][n][2][0][i][j] = 0.0; lhsZ[m][n][0][ksize][i][j] = 0.0; lhsZ[m][n][1][ksize][i][j] = 0.0; lhsZ[m][n][2][ksize][i][j] = 0.0; } } } } // next, set all diagonal values to 1. This is overkill, but convenient #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (m = 0; m < 5; m++){ for (i = 1; i <= gp02; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= gp12; j++) { lhsZ[m][m][1][0][i][j] = 1.0; lhsZ[m][m][1][ksize][i][j] = 1.0; } } } #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for collapse(2) private(i,j,k) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= ksize-1; k++) { for (i = 1; i <= gp02; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= gp12; j++) { lhsZ[0][0][AA][k][i][j] = - dttz2 * fjacZ[0][0][k-1][i][j] - dttz1 * njacZ[0][0][k-1][i][j] - dttz1 * dz1; lhsZ[0][1][AA][k][i][j] = - dttz2 * fjacZ[0][1][k-1][i][j] - dttz1 * njacZ[0][1][k-1][i][j]; lhsZ[0][2][AA][k][i][j] = - dttz2 * fjacZ[0][2][k-1][i][j] - dttz1 * njacZ[0][2][k-1][i][j]; lhsZ[0][3][AA][k][i][j] = - dttz2 * fjacZ[0][3][k-1][i][j] - dttz1 * njacZ[0][3][k-1][i][j]; lhsZ[0][4][AA][k][i][j] = - dttz2 * fjacZ[0][4][k-1][i][j] - dttz1 * njacZ[0][4][k-1][i][j]; lhsZ[1][0][AA][k][i][j] = - dttz2 * fjacZ[1][0][k-1][i][j] - dttz1 * njacZ[1][0][k-1][i][j]; lhsZ[1][1][AA][k][i][j] = - dttz2 * fjacZ[1][1][k-1][i][j] - dttz1 * njacZ[1][1][k-1][i][j] - dttz1 * dz2; lhsZ[1][2][AA][k][i][j] = - dttz2 * fjacZ[1][2][k-1][i][j] - dttz1 * njacZ[1][2][k-1][i][j]; lhsZ[1][3][AA][k][i][j] = - dttz2 * fjacZ[1][3][k-1][i][j] - dttz1 * njacZ[1][3][k-1][i][j]; lhsZ[1][4][AA][k][i][j] = - dttz2 * fjacZ[1][4][k-1][i][j] - dttz1 * njacZ[1][4][k-1][i][j]; lhsZ[2][0][AA][k][i][j] = - dttz2 * fjacZ[2][0][k-1][i][j] - dttz1 * njacZ[2][0][k-1][i][j]; lhsZ[2][1][AA][k][i][j] = - dttz2 * fjacZ[2][1][k-1][i][j] - dttz1 * njacZ[2][1][k-1][i][j]; lhsZ[2][2][AA][k][i][j] = - dttz2 * fjacZ[2][2][k-1][i][j] - dttz1 * njacZ[2][2][k-1][i][j] - dttz1 * dz3; lhsZ[2][3][AA][k][i][j] = - dttz2 * fjacZ[2][3][k-1][i][j] - dttz1 * njacZ[2][3][k-1][i][j]; lhsZ[2][4][AA][k][i][j] = - dttz2 * fjacZ[2][4][k-1][i][j] - dttz1 * njacZ[2][4][k-1][i][j]; lhsZ[3][0][AA][k][i][j] = - dttz2 * fjacZ[3][0][k-1][i][j] - dttz1 * njacZ[3][0][k-1][i][j]; lhsZ[3][1][AA][k][i][j] = - dttz2 * fjacZ[3][1][k-1][i][j] - dttz1 * njacZ[3][1][k-1][i][j]; lhsZ[3][2][AA][k][i][j] = - dttz2 * fjacZ[3][2][k-1][i][j] - dttz1 * njacZ[3][2][k-1][i][j]; lhsZ[3][3][AA][k][i][j] = - dttz2 * fjacZ[3][3][k-1][i][j] - dttz1 * njacZ[3][3][k-1][i][j] - dttz1 * dz4; lhsZ[3][4][AA][k][i][j] = - dttz2 * fjacZ[3][4][k-1][i][j] - dttz1 * njacZ[3][4][k-1][i][j]; lhsZ[4][0][AA][k][i][j] = - dttz2 * fjacZ[4][0][k-1][i][j] - dttz1 * njacZ[4][0][k-1][i][j]; lhsZ[4][1][AA][k][i][j] = - dttz2 * fjacZ[4][1][k-1][i][j] - dttz1 * njacZ[4][1][k-1][i][j]; lhsZ[4][2][AA][k][i][j] = - dttz2 * fjacZ[4][2][k-1][i][j] - dttz1 * njacZ[4][2][k-1][i][j]; lhsZ[4][3][AA][k][i][j] = - dttz2 * fjacZ[4][3][k-1][i][j] - dttz1 * njacZ[4][3][k-1][i][j]; lhsZ[4][4][AA][k][i][j] = - dttz2 * fjacZ[4][4][k-1][i][j] - dttz1 * njacZ[4][4][k-1][i][j] - dttz1 * dz5; lhsZ[0][0][BB][k][i][j] = 1.0 + dttz1 * 2.0 * njacZ[0][0][k][i][j] + dttz1 * 2.0 * dz1; lhsZ[0][1][BB][k][i][j] = dttz1 * 2.0 * njacZ[0][1][k][i][j]; lhsZ[0][2][BB][k][i][j] = dttz1 * 2.0 * njacZ[0][2][k][i][j]; lhsZ[0][3][BB][k][i][j] = dttz1 * 2.0 * njacZ[0][3][k][i][j]; lhsZ[0][4][BB][k][i][j] = dttz1 * 2.0 * njacZ[0][4][k][i][j]; lhsZ[1][0][BB][k][i][j] = dttz1 * 2.0 * njacZ[1][0][k][i][j]; lhsZ[1][1][BB][k][i][j] = 1.0 + dttz1 * 2.0 * njacZ[1][1][k][i][j] + dttz1 * 2.0 * dz2; lhsZ[1][2][BB][k][i][j] = dttz1 * 2.0 * njacZ[1][2][k][i][j]; lhsZ[1][3][BB][k][i][j] = dttz1 * 2.0 * njacZ[1][3][k][i][j]; lhsZ[1][4][BB][k][i][j] = dttz1 * 2.0 * njacZ[1][4][k][i][j]; lhsZ[2][0][BB][k][i][j] = dttz1 * 2.0 * njacZ[2][0][k][i][j]; lhsZ[2][1][BB][k][i][j] = dttz1 * 2.0 * njacZ[2][1][k][i][j]; lhsZ[2][2][BB][k][i][j] = 1.0 + dttz1 * 2.0 * njacZ[2][2][k][i][j] + dttz1 * 2.0 * dz3; lhsZ[2][3][BB][k][i][j] = dttz1 * 2.0 * njacZ[2][3][k][i][j]; lhsZ[2][4][BB][k][i][j] = dttz1 * 2.0 * njacZ[2][4][k][i][j]; lhsZ[3][0][BB][k][i][j] = dttz1 * 2.0 * njacZ[3][0][k][i][j]; lhsZ[3][1][BB][k][i][j] = dttz1 * 2.0 * njacZ[3][1][k][i][j]; lhsZ[3][2][BB][k][i][j] = dttz1 * 2.0 * njacZ[3][2][k][i][j]; lhsZ[3][3][BB][k][i][j] = 1.0 + dttz1 * 2.0 * njacZ[3][3][k][i][j] + dttz1 * 2.0 * dz4; lhsZ[3][4][BB][k][i][j] = dttz1 * 2.0 * njacZ[3][4][k][i][j]; lhsZ[4][0][BB][k][i][j] = dttz1 * 2.0 * njacZ[4][0][k][i][j]; lhsZ[4][1][BB][k][i][j] = dttz1 * 2.0 * njacZ[4][1][k][i][j]; lhsZ[4][2][BB][k][i][j] = dttz1 * 2.0 * njacZ[4][2][k][i][j]; lhsZ[4][3][BB][k][i][j] = dttz1 * 2.0 * njacZ[4][3][k][i][j]; lhsZ[4][4][BB][k][i][j] = 1.0 + dttz1 * 2.0 * njacZ[4][4][k][i][j] + dttz1 * 2.0 * dz5; lhsZ[0][0][CC][k][i][j] = dttz2 * fjacZ[0][0][k+1][i][j] - dttz1 * njacZ[0][0][k+1][i][j] - dttz1 * dz1; lhsZ[0][1][CC][k][i][j] = dttz2 * fjacZ[0][1][k+1][i][j] - dttz1 * njacZ[0][1][k+1][i][j]; lhsZ[0][2][CC][k][i][j] = dttz2 * fjacZ[0][2][k+1][i][j] - dttz1 * njacZ[0][2][k+1][i][j]; lhsZ[0][3][CC][k][i][j] = dttz2 * fjacZ[0][3][k+1][i][j] - dttz1 * njacZ[0][3][k+1][i][j]; lhsZ[0][4][CC][k][i][j] = dttz2 * fjacZ[0][4][k+1][i][j] - dttz1 * njacZ[0][4][k+1][i][j]; lhsZ[1][0][CC][k][i][j] = dttz2 * fjacZ[1][0][k+1][i][j] - dttz1 * njacZ[1][0][k+1][i][j]; lhsZ[1][1][CC][k][i][j] = dttz2 * fjacZ[1][1][k+1][i][j] - dttz1 * njacZ[1][1][k+1][i][j] - dttz1 * dz2; lhsZ[1][2][CC][k][i][j] = dttz2 * fjacZ[1][2][k+1][i][j] - dttz1 * njacZ[1][2][k+1][i][j]; lhsZ[1][3][CC][k][i][j] = dttz2 * fjacZ[1][3][k+1][i][j] - dttz1 * njacZ[1][3][k+1][i][j]; lhsZ[1][4][CC][k][i][j] = dttz2 * fjacZ[1][4][k+1][i][j] - dttz1 * njacZ[1][4][k+1][i][j]; lhsZ[2][0][CC][k][i][j] = dttz2 * fjacZ[2][0][k+1][i][j] - dttz1 * njacZ[2][0][k+1][i][j]; lhsZ[2][1][CC][k][i][j] = dttz2 * fjacZ[2][1][k+1][i][j] - dttz1 * njacZ[2][1][k+1][i][j]; lhsZ[2][2][CC][k][i][j] = dttz2 * fjacZ[2][2][k+1][i][j] - dttz1 * njacZ[2][2][k+1][i][j] - dttz1 * dz3; lhsZ[2][3][CC][k][i][j] = dttz2 * fjacZ[2][3][k+1][i][j] - dttz1 * njacZ[2][3][k+1][i][j]; lhsZ[2][4][CC][k][i][j] = dttz2 * fjacZ[2][4][k+1][i][j] - dttz1 * njacZ[2][4][k+1][i][j]; lhsZ[3][0][CC][k][i][j] = dttz2 * fjacZ[3][0][k+1][i][j] - dttz1 * njacZ[3][0][k+1][i][j]; lhsZ[3][1][CC][k][i][j] = dttz2 * fjacZ[3][1][k+1][i][j] - dttz1 * njacZ[3][1][k+1][i][j]; lhsZ[3][2][CC][k][i][j] = dttz2 * fjacZ[3][2][k+1][i][j] - dttz1 * njacZ[3][2][k+1][i][j]; lhsZ[3][3][CC][k][i][j] = dttz2 * fjacZ[3][3][k+1][i][j] - dttz1 * njacZ[3][3][k+1][i][j] - dttz1 * dz4; lhsZ[3][4][CC][k][i][j] = dttz2 * fjacZ[3][4][k+1][i][j] - dttz1 * njacZ[3][4][k+1][i][j]; lhsZ[4][0][CC][k][i][j] = dttz2 * fjacZ[4][0][k+1][i][j] - dttz1 * njacZ[4][0][k+1][i][j]; lhsZ[4][1][CC][k][i][j] = dttz2 * fjacZ[4][1][k+1][i][j] - dttz1 * njacZ[4][1][k+1][i][j]; lhsZ[4][2][CC][k][i][j] = dttz2 * fjacZ[4][2][k+1][i][j] - dttz1 * njacZ[4][2][k+1][i][j]; lhsZ[4][3][CC][k][i][j] = dttz2 * fjacZ[4][3][k+1][i][j] - dttz1 * njacZ[4][3][k+1][i][j]; lhsZ[4][4][CC][k][i][j] = dttz2 * fjacZ[4][4][k+1][i][j] - dttz1 * njacZ[4][4][k+1][i][j] - dttz1 * dz5; } } } //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // performs guaussian elimination on this cell. // // assumes that unpacking routines for non-first cells // preload C' and rhs' from previous cell. // // assumed send happens outside this routine, but that // c'(KMAX) and rhs'(KMAX) will be sent to next cell. //--------------------------------------------------------------------- //--------------------------------------------------------------------- // outer most do loops - sweeping in i direction //--------------------------------------------------------------------- //--------------------------------------------------------------------- // multiply c[0][j][i] by b_inverse and copy back to c // multiply rhs(0) by b_inverse(0) and copy to rhs //--------------------------------------------------------------------- //binvcrhs( lhsZ[0][i][BB], lhsZ[j][0][i][j][CC], rhs[0][j][i] ); #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,pivot, coeff) #else #pragma omp target teams distribute parallel for simd private(pivot, coeff) collapse(2) #endif for (i = 1; i <= gp02; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(pivot, coeff) #endif for (j = 1; j <= gp12; j++) { /* for(m = 0; m < 5; m++){ pivot = 1.00/lhsZ[m][m][BB][0][i][j]; for(n = m+1; n < 5; n++){ lhsZ[m][n][BB][0][i][j] = lhsZ[m][n][BB][0][i][j]*pivot; } lhsZ[m][0][CC][0][i][j] = lhsZ[m][0][CC][0][i][j]*pivot; lhsZ[m][1][CC][0][i][j] = lhsZ[m][1][CC][0][i][j]*pivot; lhsZ[m][2][CC][0][i][j] = lhsZ[m][2][CC][0][i][j]*pivot; lhsZ[m][3][CC][0][i][j] = lhsZ[m][3][CC][0][i][j]*pivot; lhsZ[m][4][CC][0][i][j] = lhsZ[m][4][CC][0][i][j]*pivot; rhs[0][j][i][m] = rhs[0][j][i][m]*pivot; for(n = 0; n < 5; n++){ if(n != m){ coeff = lhsZ[n][m][BB][0][i][j]; for(z = m+1; z < 5; z++){ lhsZ[n][z][BB][0][i][j] = lhsZ[n][z][BB][0][i][j] - coeff*lhsZ[m][z][BB][0][i][j]; } lhsZ[n][0][CC][0][i][j] = lhsZ[n][0][CC][0][i][j] - coeff*lhsZ[m][0][CC][0][i][j]; lhsZ[n][1][CC][0][i][j] = lhsZ[n][1][CC][0][i][j] - coeff*lhsZ[m][1][CC][0][i][j]; lhsZ[n][2][CC][0][i][j] = lhsZ[n][2][CC][0][i][j] - coeff*lhsZ[m][2][CC][0][i][j]; lhsZ[n][3][CC][0][i][j] = lhsZ[n][3][CC][0][i][j] - coeff*lhsZ[m][3][CC][0][i][j]; lhsZ[n][4][CC][0][i][j] = lhsZ[n][4][CC][0][i][j] - coeff*lhsZ[m][4][CC][0][i][j]; rhs[0][j][i][n] = rhs[0][j][i][n] - coeff*rhs[0][j][i][m]; } } } */ pivot = 1.00/lhsZ[0][0][BB][0][i][j]; lhsZ[0][1][BB][0][i][j] = lhsZ[0][1][BB][0][i][j]*pivot; lhsZ[0][2][BB][0][i][j] = lhsZ[0][2][BB][0][i][j]*pivot; lhsZ[0][3][BB][0][i][j] = lhsZ[0][3][BB][0][i][j]*pivot; lhsZ[0][4][BB][0][i][j] = lhsZ[0][4][BB][0][i][j]*pivot; lhsZ[0][0][CC][0][i][j] = lhsZ[0][0][CC][0][i][j]*pivot; lhsZ[0][1][CC][0][i][j] = lhsZ[0][1][CC][0][i][j]*pivot; lhsZ[0][2][CC][0][i][j] = lhsZ[0][2][CC][0][i][j]*pivot; lhsZ[0][3][CC][0][i][j] = lhsZ[0][3][CC][0][i][j]*pivot; lhsZ[0][4][CC][0][i][j] = lhsZ[0][4][CC][0][i][j]*pivot; rhs[0][j][i][0] = rhs[0][j][i][0] *pivot; coeff = lhsZ[1][0][BB][0][i][j]; lhsZ[1][1][BB][0][i][j]= lhsZ[1][1][BB][0][i][j] - coeff*lhsZ[0][1][BB][0][i][j]; lhsZ[1][2][BB][0][i][j]= lhsZ[1][2][BB][0][i][j] - coeff*lhsZ[0][2][BB][0][i][j]; lhsZ[1][3][BB][0][i][j]= lhsZ[1][3][BB][0][i][j] - coeff*lhsZ[0][3][BB][0][i][j]; lhsZ[1][4][BB][0][i][j]= lhsZ[1][4][BB][0][i][j] - coeff*lhsZ[0][4][BB][0][i][j]; lhsZ[1][0][CC][0][i][j] = lhsZ[1][0][CC][0][i][j] - coeff*lhsZ[0][0][CC][0][i][j]; lhsZ[1][1][CC][0][i][j] = lhsZ[1][1][CC][0][i][j] - coeff*lhsZ[0][1][CC][0][i][j]; lhsZ[1][2][CC][0][i][j] = lhsZ[1][2][CC][0][i][j] - coeff*lhsZ[0][2][CC][0][i][j]; lhsZ[1][3][CC][0][i][j] = lhsZ[1][3][CC][0][i][j] - coeff*lhsZ[0][3][CC][0][i][j]; lhsZ[1][4][CC][0][i][j] = lhsZ[1][4][CC][0][i][j] - coeff*lhsZ[0][4][CC][0][i][j]; rhs[0][j][i][1] = rhs[0][j][i][1] - coeff*rhs[0][j][i][0]; coeff = lhsZ[2][0][BB][0][i][j]; lhsZ[2][1][BB][0][i][j]= lhsZ[2][1][BB][0][i][j] - coeff*lhsZ[0][1][BB][0][i][j]; lhsZ[2][2][BB][0][i][j]= lhsZ[2][2][BB][0][i][j] - coeff*lhsZ[0][2][BB][0][i][j]; lhsZ[2][3][BB][0][i][j]= lhsZ[2][3][BB][0][i][j] - coeff*lhsZ[0][3][BB][0][i][j]; lhsZ[2][4][BB][0][i][j]= lhsZ[2][4][BB][0][i][j] - coeff*lhsZ[0][4][BB][0][i][j]; lhsZ[2][0][CC][0][i][j] = lhsZ[2][0][CC][0][i][j] - coeff*lhsZ[0][0][CC][0][i][j]; lhsZ[2][1][CC][0][i][j] = lhsZ[2][1][CC][0][i][j] - coeff*lhsZ[0][1][CC][0][i][j]; lhsZ[2][2][CC][0][i][j] = lhsZ[2][2][CC][0][i][j] - coeff*lhsZ[0][2][CC][0][i][j]; lhsZ[2][3][CC][0][i][j] = lhsZ[2][3][CC][0][i][j] - coeff*lhsZ[0][3][CC][0][i][j]; lhsZ[2][4][CC][0][i][j] = lhsZ[2][4][CC][0][i][j] - coeff*lhsZ[0][4][CC][0][i][j]; rhs[0][j][i][2] = rhs[0][j][i][2] - coeff*rhs[0][j][i][0]; coeff = lhsZ[3][0][BB][0][i][j]; lhsZ[3][1][BB][0][i][j]= lhsZ[3][1][BB][0][i][j] - coeff*lhsZ[0][1][BB][0][i][j]; lhsZ[3][2][BB][0][i][j]= lhsZ[3][2][BB][0][i][j] - coeff*lhsZ[0][2][BB][0][i][j]; lhsZ[3][3][BB][0][i][j]= lhsZ[3][3][BB][0][i][j] - coeff*lhsZ[0][3][BB][0][i][j]; lhsZ[3][4][BB][0][i][j]= lhsZ[3][4][BB][0][i][j] - coeff*lhsZ[0][4][BB][0][i][j]; lhsZ[3][0][CC][0][i][j] = lhsZ[3][0][CC][0][i][j] - coeff*lhsZ[0][0][CC][0][i][j]; lhsZ[3][1][CC][0][i][j] = lhsZ[3][1][CC][0][i][j] - coeff*lhsZ[0][1][CC][0][i][j]; lhsZ[3][2][CC][0][i][j] = lhsZ[3][2][CC][0][i][j] - coeff*lhsZ[0][2][CC][0][i][j]; lhsZ[3][3][CC][0][i][j] = lhsZ[3][3][CC][0][i][j] - coeff*lhsZ[0][3][CC][0][i][j]; lhsZ[3][4][CC][0][i][j] = lhsZ[3][4][CC][0][i][j] - coeff*lhsZ[0][4][CC][0][i][j]; rhs[0][j][i][3] = rhs[0][j][i][3] - coeff*rhs[0][j][i][0]; coeff = lhsZ[4][0][BB][0][i][j]; lhsZ[4][1][BB][0][i][j]= lhsZ[4][1][BB][0][i][j] - coeff*lhsZ[0][1][BB][0][i][j]; lhsZ[4][2][BB][0][i][j]= lhsZ[4][2][BB][0][i][j] - coeff*lhsZ[0][2][BB][0][i][j]; lhsZ[4][3][BB][0][i][j]= lhsZ[4][3][BB][0][i][j] - coeff*lhsZ[0][3][BB][0][i][j]; lhsZ[4][4][BB][0][i][j]= lhsZ[4][4][BB][0][i][j] - coeff*lhsZ[0][4][BB][0][i][j]; lhsZ[4][0][CC][0][i][j] = lhsZ[4][0][CC][0][i][j] - coeff*lhsZ[0][0][CC][0][i][j]; lhsZ[4][1][CC][0][i][j] = lhsZ[4][1][CC][0][i][j] - coeff*lhsZ[0][1][CC][0][i][j]; lhsZ[4][2][CC][0][i][j] = lhsZ[4][2][CC][0][i][j] - coeff*lhsZ[0][2][CC][0][i][j]; lhsZ[4][3][CC][0][i][j] = lhsZ[4][3][CC][0][i][j] - coeff*lhsZ[0][3][CC][0][i][j]; lhsZ[4][4][CC][0][i][j] = lhsZ[4][4][CC][0][i][j] - coeff*lhsZ[0][4][CC][0][i][j]; rhs[0][j][i][4] = rhs[0][j][i][4] - coeff*rhs[0][j][i][0]; pivot = 1.00/lhsZ[1][1][BB][0][i][j]; lhsZ[1][2][BB][0][i][j] = lhsZ[1][2][BB][0][i][j]*pivot; lhsZ[1][3][BB][0][i][j] = lhsZ[1][3][BB][0][i][j]*pivot; lhsZ[1][4][BB][0][i][j] = lhsZ[1][4][BB][0][i][j]*pivot; lhsZ[1][0][CC][0][i][j] = lhsZ[1][0][CC][0][i][j]*pivot; lhsZ[1][1][CC][0][i][j] = lhsZ[1][1][CC][0][i][j]*pivot; lhsZ[1][2][CC][0][i][j] = lhsZ[1][2][CC][0][i][j]*pivot; lhsZ[1][3][CC][0][i][j] = lhsZ[1][3][CC][0][i][j]*pivot; lhsZ[1][4][CC][0][i][j] = lhsZ[1][4][CC][0][i][j]*pivot; rhs[0][j][i][1] = rhs[0][j][i][1] *pivot; coeff = lhsZ[0][1][BB][0][i][j]; lhsZ[0][2][BB][0][i][j]= lhsZ[0][2][BB][0][i][j] - coeff*lhsZ[1][2][BB][0][i][j]; lhsZ[0][3][BB][0][i][j]= lhsZ[0][3][BB][0][i][j] - coeff*lhsZ[1][3][BB][0][i][j]; lhsZ[0][4][BB][0][i][j]= lhsZ[0][4][BB][0][i][j] - coeff*lhsZ[1][4][BB][0][i][j]; lhsZ[0][0][CC][0][i][j] = lhsZ[0][0][CC][0][i][j] - coeff*lhsZ[1][0][CC][0][i][j]; lhsZ[0][1][CC][0][i][j] = lhsZ[0][1][CC][0][i][j] - coeff*lhsZ[1][1][CC][0][i][j]; lhsZ[0][2][CC][0][i][j] = lhsZ[0][2][CC][0][i][j] - coeff*lhsZ[1][2][CC][0][i][j]; lhsZ[0][3][CC][0][i][j] = lhsZ[0][3][CC][0][i][j] - coeff*lhsZ[1][3][CC][0][i][j]; lhsZ[0][4][CC][0][i][j] = lhsZ[0][4][CC][0][i][j] - coeff*lhsZ[1][4][CC][0][i][j]; rhs[0][j][i][0] = rhs[0][j][i][0] - coeff*rhs[0][j][i][1]; coeff = lhsZ[2][1][BB][0][i][j]; lhsZ[2][2][BB][0][i][j]= lhsZ[2][2][BB][0][i][j] - coeff*lhsZ[1][2][BB][0][i][j]; lhsZ[2][3][BB][0][i][j]= lhsZ[2][3][BB][0][i][j] - coeff*lhsZ[1][3][BB][0][i][j]; lhsZ[2][4][BB][0][i][j]= lhsZ[2][4][BB][0][i][j] - coeff*lhsZ[1][4][BB][0][i][j]; lhsZ[2][0][CC][0][i][j] = lhsZ[2][0][CC][0][i][j] - coeff*lhsZ[1][0][CC][0][i][j]; lhsZ[2][1][CC][0][i][j] = lhsZ[2][1][CC][0][i][j] - coeff*lhsZ[1][1][CC][0][i][j]; lhsZ[2][2][CC][0][i][j] = lhsZ[2][2][CC][0][i][j] - coeff*lhsZ[1][2][CC][0][i][j]; lhsZ[2][3][CC][0][i][j] = lhsZ[2][3][CC][0][i][j] - coeff*lhsZ[1][3][CC][0][i][j]; lhsZ[2][4][CC][0][i][j] = lhsZ[2][4][CC][0][i][j] - coeff*lhsZ[1][4][CC][0][i][j]; rhs[0][j][i][2] = rhs[0][j][i][2] - coeff*rhs[0][j][i][1]; coeff = lhsZ[3][1][BB][0][i][j]; lhsZ[3][2][BB][0][i][j]= lhsZ[3][2][BB][0][i][j] - coeff*lhsZ[1][2][BB][0][i][j]; lhsZ[3][3][BB][0][i][j]= lhsZ[3][3][BB][0][i][j] - coeff*lhsZ[1][3][BB][0][i][j]; lhsZ[3][4][BB][0][i][j]= lhsZ[3][4][BB][0][i][j] - coeff*lhsZ[1][4][BB][0][i][j]; lhsZ[3][0][CC][0][i][j] = lhsZ[3][0][CC][0][i][j] - coeff*lhsZ[1][0][CC][0][i][j]; lhsZ[3][1][CC][0][i][j] = lhsZ[3][1][CC][0][i][j] - coeff*lhsZ[1][1][CC][0][i][j]; lhsZ[3][2][CC][0][i][j] = lhsZ[3][2][CC][0][i][j] - coeff*lhsZ[1][2][CC][0][i][j]; lhsZ[3][3][CC][0][i][j] = lhsZ[3][3][CC][0][i][j] - coeff*lhsZ[1][3][CC][0][i][j]; lhsZ[3][4][CC][0][i][j] = lhsZ[3][4][CC][0][i][j] - coeff*lhsZ[1][4][CC][0][i][j]; rhs[0][j][i][3] = rhs[0][j][i][3] - coeff*rhs[0][j][i][1]; coeff = lhsZ[4][1][BB][0][i][j]; lhsZ[4][2][BB][0][i][j]= lhsZ[4][2][BB][0][i][j] - coeff*lhsZ[1][2][BB][0][i][j]; lhsZ[4][3][BB][0][i][j]= lhsZ[4][3][BB][0][i][j] - coeff*lhsZ[1][3][BB][0][i][j]; lhsZ[4][4][BB][0][i][j]= lhsZ[4][4][BB][0][i][j] - coeff*lhsZ[1][4][BB][0][i][j]; lhsZ[4][0][CC][0][i][j] = lhsZ[4][0][CC][0][i][j] - coeff*lhsZ[1][0][CC][0][i][j]; lhsZ[4][1][CC][0][i][j] = lhsZ[4][1][CC][0][i][j] - coeff*lhsZ[1][1][CC][0][i][j]; lhsZ[4][2][CC][0][i][j] = lhsZ[4][2][CC][0][i][j] - coeff*lhsZ[1][2][CC][0][i][j]; lhsZ[4][3][CC][0][i][j] = lhsZ[4][3][CC][0][i][j] - coeff*lhsZ[1][3][CC][0][i][j]; lhsZ[4][4][CC][0][i][j] = lhsZ[4][4][CC][0][i][j] - coeff*lhsZ[1][4][CC][0][i][j]; rhs[0][j][i][4] = rhs[0][j][i][4] - coeff*rhs[0][j][i][1]; pivot = 1.00/lhsZ[2][2][BB][0][i][j]; lhsZ[2][3][BB][0][i][j] = lhsZ[2][3][BB][0][i][j]*pivot; lhsZ[2][4][BB][0][i][j] = lhsZ[2][4][BB][0][i][j]*pivot; lhsZ[2][0][CC][0][i][j] = lhsZ[2][0][CC][0][i][j]*pivot; lhsZ[2][1][CC][0][i][j] = lhsZ[2][1][CC][0][i][j]*pivot; lhsZ[2][2][CC][0][i][j] = lhsZ[2][2][CC][0][i][j]*pivot; lhsZ[2][3][CC][0][i][j] = lhsZ[2][3][CC][0][i][j]*pivot; lhsZ[2][4][CC][0][i][j] = lhsZ[2][4][CC][0][i][j]*pivot; rhs[0][j][i][2] = rhs[0][j][i][2] *pivot; coeff = lhsZ[0][2][BB][0][i][j]; lhsZ[0][3][BB][0][i][j]= lhsZ[0][3][BB][0][i][j] - coeff*lhsZ[2][3][BB][0][i][j]; lhsZ[0][4][BB][0][i][j]= lhsZ[0][4][BB][0][i][j] - coeff*lhsZ[2][4][BB][0][i][j]; lhsZ[0][0][CC][0][i][j] = lhsZ[0][0][CC][0][i][j] - coeff*lhsZ[2][0][CC][0][i][j]; lhsZ[0][1][CC][0][i][j] = lhsZ[0][1][CC][0][i][j] - coeff*lhsZ[2][1][CC][0][i][j]; lhsZ[0][2][CC][0][i][j] = lhsZ[0][2][CC][0][i][j] - coeff*lhsZ[2][2][CC][0][i][j]; lhsZ[0][3][CC][0][i][j] = lhsZ[0][3][CC][0][i][j] - coeff*lhsZ[2][3][CC][0][i][j]; lhsZ[0][4][CC][0][i][j] = lhsZ[0][4][CC][0][i][j] - coeff*lhsZ[2][4][CC][0][i][j]; rhs[0][j][i][0] = rhs[0][j][i][0] - coeff*rhs[0][j][i][2]; coeff = lhsZ[1][2][BB][0][i][j]; lhsZ[1][3][BB][0][i][j]= lhsZ[1][3][BB][0][i][j] - coeff*lhsZ[2][3][BB][0][i][j]; lhsZ[1][4][BB][0][i][j]= lhsZ[1][4][BB][0][i][j] - coeff*lhsZ[2][4][BB][0][i][j]; lhsZ[1][0][CC][0][i][j] = lhsZ[1][0][CC][0][i][j] - coeff*lhsZ[2][0][CC][0][i][j]; lhsZ[1][1][CC][0][i][j] = lhsZ[1][1][CC][0][i][j] - coeff*lhsZ[2][1][CC][0][i][j]; lhsZ[1][2][CC][0][i][j] = lhsZ[1][2][CC][0][i][j] - coeff*lhsZ[2][2][CC][0][i][j]; lhsZ[1][3][CC][0][i][j] = lhsZ[1][3][CC][0][i][j] - coeff*lhsZ[2][3][CC][0][i][j]; lhsZ[1][4][CC][0][i][j] = lhsZ[1][4][CC][0][i][j] - coeff*lhsZ[2][4][CC][0][i][j]; rhs[0][j][i][1] = rhs[0][j][i][1] - coeff*rhs[0][j][i][2]; coeff = lhsZ[3][2][BB][0][i][j]; lhsZ[3][3][BB][0][i][j]= lhsZ[3][3][BB][0][i][j] - coeff*lhsZ[2][3][BB][0][i][j]; lhsZ[3][4][BB][0][i][j]= lhsZ[3][4][BB][0][i][j] - coeff*lhsZ[2][4][BB][0][i][j]; lhsZ[3][0][CC][0][i][j] = lhsZ[3][0][CC][0][i][j] - coeff*lhsZ[2][0][CC][0][i][j]; lhsZ[3][1][CC][0][i][j] = lhsZ[3][1][CC][0][i][j] - coeff*lhsZ[2][1][CC][0][i][j]; lhsZ[3][2][CC][0][i][j] = lhsZ[3][2][CC][0][i][j] - coeff*lhsZ[2][2][CC][0][i][j]; lhsZ[3][3][CC][0][i][j] = lhsZ[3][3][CC][0][i][j] - coeff*lhsZ[2][3][CC][0][i][j]; lhsZ[3][4][CC][0][i][j] = lhsZ[3][4][CC][0][i][j] - coeff*lhsZ[2][4][CC][0][i][j]; rhs[0][j][i][3] = rhs[0][j][i][3] - coeff*rhs[0][j][i][2]; coeff = lhsZ[4][2][BB][0][i][j]; lhsZ[4][3][BB][0][i][j]= lhsZ[4][3][BB][0][i][j] - coeff*lhsZ[2][3][BB][0][i][j]; lhsZ[4][4][BB][0][i][j]= lhsZ[4][4][BB][0][i][j] - coeff*lhsZ[2][4][BB][0][i][j]; lhsZ[4][0][CC][0][i][j] = lhsZ[4][0][CC][0][i][j] - coeff*lhsZ[2][0][CC][0][i][j]; lhsZ[4][1][CC][0][i][j] = lhsZ[4][1][CC][0][i][j] - coeff*lhsZ[2][1][CC][0][i][j]; lhsZ[4][2][CC][0][i][j] = lhsZ[4][2][CC][0][i][j] - coeff*lhsZ[2][2][CC][0][i][j]; lhsZ[4][3][CC][0][i][j] = lhsZ[4][3][CC][0][i][j] - coeff*lhsZ[2][3][CC][0][i][j]; lhsZ[4][4][CC][0][i][j] = lhsZ[4][4][CC][0][i][j] - coeff*lhsZ[2][4][CC][0][i][j]; rhs[0][j][i][4] = rhs[0][j][i][4] - coeff*rhs[0][j][i][2]; pivot = 1.00/lhsZ[3][3][BB][0][i][j]; lhsZ[3][4][BB][0][i][j] = lhsZ[3][4][BB][0][i][j]*pivot; lhsZ[3][0][CC][0][i][j] = lhsZ[3][0][CC][0][i][j]*pivot; lhsZ[3][1][CC][0][i][j] = lhsZ[3][1][CC][0][i][j]*pivot; lhsZ[3][2][CC][0][i][j] = lhsZ[3][2][CC][0][i][j]*pivot; lhsZ[3][3][CC][0][i][j] = lhsZ[3][3][CC][0][i][j]*pivot; lhsZ[3][4][CC][0][i][j] = lhsZ[3][4][CC][0][i][j]*pivot; rhs[0][j][i][3] = rhs[0][j][i][3] *pivot; coeff = lhsZ[0][3][BB][0][i][j]; lhsZ[0][4][BB][0][i][j]= lhsZ[0][4][BB][0][i][j] - coeff*lhsZ[3][4][BB][0][i][j]; lhsZ[0][0][CC][0][i][j] = lhsZ[0][0][CC][0][i][j] - coeff*lhsZ[3][0][CC][0][i][j]; lhsZ[0][1][CC][0][i][j] = lhsZ[0][1][CC][0][i][j] - coeff*lhsZ[3][1][CC][0][i][j]; lhsZ[0][2][CC][0][i][j] = lhsZ[0][2][CC][0][i][j] - coeff*lhsZ[3][2][CC][0][i][j]; lhsZ[0][3][CC][0][i][j] = lhsZ[0][3][CC][0][i][j] - coeff*lhsZ[3][3][CC][0][i][j]; lhsZ[0][4][CC][0][i][j] = lhsZ[0][4][CC][0][i][j] - coeff*lhsZ[3][4][CC][0][i][j]; rhs[0][j][i][0] = rhs[0][j][i][0] - coeff*rhs[0][j][i][3]; coeff = lhsZ[1][3][BB][0][i][j]; lhsZ[1][4][BB][0][i][j]= lhsZ[1][4][BB][0][i][j] - coeff*lhsZ[3][4][BB][0][i][j]; lhsZ[1][0][CC][0][i][j] = lhsZ[1][0][CC][0][i][j] - coeff*lhsZ[3][0][CC][0][i][j]; lhsZ[1][1][CC][0][i][j] = lhsZ[1][1][CC][0][i][j] - coeff*lhsZ[3][1][CC][0][i][j]; lhsZ[1][2][CC][0][i][j] = lhsZ[1][2][CC][0][i][j] - coeff*lhsZ[3][2][CC][0][i][j]; lhsZ[1][3][CC][0][i][j] = lhsZ[1][3][CC][0][i][j] - coeff*lhsZ[3][3][CC][0][i][j]; lhsZ[1][4][CC][0][i][j] = lhsZ[1][4][CC][0][i][j] - coeff*lhsZ[3][4][CC][0][i][j]; rhs[0][j][i][1] = rhs[0][j][i][1] - coeff*rhs[0][j][i][3]; coeff = lhsZ[2][3][BB][0][i][j]; lhsZ[2][4][BB][0][i][j]= lhsZ[2][4][BB][0][i][j] - coeff*lhsZ[3][4][BB][0][i][j]; lhsZ[2][0][CC][0][i][j] = lhsZ[2][0][CC][0][i][j] - coeff*lhsZ[3][0][CC][0][i][j]; lhsZ[2][1][CC][0][i][j] = lhsZ[2][1][CC][0][i][j] - coeff*lhsZ[3][1][CC][0][i][j]; lhsZ[2][2][CC][0][i][j] = lhsZ[2][2][CC][0][i][j] - coeff*lhsZ[3][2][CC][0][i][j]; lhsZ[2][3][CC][0][i][j] = lhsZ[2][3][CC][0][i][j] - coeff*lhsZ[3][3][CC][0][i][j]; lhsZ[2][4][CC][0][i][j] = lhsZ[2][4][CC][0][i][j] - coeff*lhsZ[3][4][CC][0][i][j]; rhs[0][j][i][2] = rhs[0][j][i][2] - coeff*rhs[0][j][i][3]; coeff = lhsZ[4][3][BB][0][i][j]; lhsZ[4][4][BB][0][i][j]= lhsZ[4][4][BB][0][i][j] - coeff*lhsZ[3][4][BB][0][i][j]; lhsZ[4][0][CC][0][i][j] = lhsZ[4][0][CC][0][i][j] - coeff*lhsZ[3][0][CC][0][i][j]; lhsZ[4][1][CC][0][i][j] = lhsZ[4][1][CC][0][i][j] - coeff*lhsZ[3][1][CC][0][i][j]; lhsZ[4][2][CC][0][i][j] = lhsZ[4][2][CC][0][i][j] - coeff*lhsZ[3][2][CC][0][i][j]; lhsZ[4][3][CC][0][i][j] = lhsZ[4][3][CC][0][i][j] - coeff*lhsZ[3][3][CC][0][i][j]; lhsZ[4][4][CC][0][i][j] = lhsZ[4][4][CC][0][i][j] - coeff*lhsZ[3][4][CC][0][i][j]; rhs[0][j][i][4] = rhs[0][j][i][4] - coeff*rhs[0][j][i][3]; pivot = 1.00/lhsZ[4][4][BB][0][i][j]; lhsZ[4][0][CC][0][i][j] = lhsZ[4][0][CC][0][i][j]*pivot; lhsZ[4][1][CC][0][i][j] = lhsZ[4][1][CC][0][i][j]*pivot; lhsZ[4][2][CC][0][i][j] = lhsZ[4][2][CC][0][i][j]*pivot; lhsZ[4][3][CC][0][i][j] = lhsZ[4][3][CC][0][i][j]*pivot; lhsZ[4][4][CC][0][i][j] = lhsZ[4][4][CC][0][i][j]*pivot; rhs[0][j][i][4] = rhs[0][j][i][4] *pivot; coeff = lhsZ[0][4][BB][0][i][j]; lhsZ[0][0][CC][0][i][j] = lhsZ[0][0][CC][0][i][j] - coeff*lhsZ[4][0][CC][0][i][j]; lhsZ[0][1][CC][0][i][j] = lhsZ[0][1][CC][0][i][j] - coeff*lhsZ[4][1][CC][0][i][j]; lhsZ[0][2][CC][0][i][j] = lhsZ[0][2][CC][0][i][j] - coeff*lhsZ[4][2][CC][0][i][j]; lhsZ[0][3][CC][0][i][j] = lhsZ[0][3][CC][0][i][j] - coeff*lhsZ[4][3][CC][0][i][j]; lhsZ[0][4][CC][0][i][j] = lhsZ[0][4][CC][0][i][j] - coeff*lhsZ[4][4][CC][0][i][j]; rhs[0][j][i][0] = rhs[0][j][i][0] - coeff*rhs[0][j][i][4]; coeff = lhsZ[1][4][BB][0][i][j]; lhsZ[1][0][CC][0][i][j] = lhsZ[1][0][CC][0][i][j] - coeff*lhsZ[4][0][CC][0][i][j]; lhsZ[1][1][CC][0][i][j] = lhsZ[1][1][CC][0][i][j] - coeff*lhsZ[4][1][CC][0][i][j]; lhsZ[1][2][CC][0][i][j] = lhsZ[1][2][CC][0][i][j] - coeff*lhsZ[4][2][CC][0][i][j]; lhsZ[1][3][CC][0][i][j] = lhsZ[1][3][CC][0][i][j] - coeff*lhsZ[4][3][CC][0][i][j]; lhsZ[1][4][CC][0][i][j] = lhsZ[1][4][CC][0][i][j] - coeff*lhsZ[4][4][CC][0][i][j]; rhs[0][j][i][1] = rhs[0][j][i][1] - coeff*rhs[0][j][i][4]; coeff = lhsZ[2][4][BB][0][i][j]; lhsZ[2][0][CC][0][i][j] = lhsZ[2][0][CC][0][i][j] - coeff*lhsZ[4][0][CC][0][i][j]; lhsZ[2][1][CC][0][i][j] = lhsZ[2][1][CC][0][i][j] - coeff*lhsZ[4][1][CC][0][i][j]; lhsZ[2][2][CC][0][i][j] = lhsZ[2][2][CC][0][i][j] - coeff*lhsZ[4][2][CC][0][i][j]; lhsZ[2][3][CC][0][i][j] = lhsZ[2][3][CC][0][i][j] - coeff*lhsZ[4][3][CC][0][i][j]; lhsZ[2][4][CC][0][i][j] = lhsZ[2][4][CC][0][i][j] - coeff*lhsZ[4][4][CC][0][i][j]; rhs[0][j][i][2] = rhs[0][j][i][2] - coeff*rhs[0][j][i][4]; coeff = lhsZ[3][4][BB][0][i][j]; lhsZ[3][0][CC][0][i][j] = lhsZ[3][0][CC][0][i][j] - coeff*lhsZ[4][0][CC][0][i][j]; lhsZ[3][1][CC][0][i][j] = lhsZ[3][1][CC][0][i][j] - coeff*lhsZ[4][1][CC][0][i][j]; lhsZ[3][2][CC][0][i][j] = lhsZ[3][2][CC][0][i][j] - coeff*lhsZ[4][2][CC][0][i][j]; lhsZ[3][3][CC][0][i][j] = lhsZ[3][3][CC][0][i][j] - coeff*lhsZ[4][3][CC][0][i][j]; lhsZ[3][4][CC][0][i][j] = lhsZ[3][4][CC][0][i][j] - coeff*lhsZ[4][4][CC][0][i][j]; rhs[0][j][i][3] = rhs[0][j][i][3] - coeff*rhs[0][j][i][4]; } } //--------------------------------------------------------------------- // begin inner most do loop // do all the elements of the cell unless last //--------------------------------------------------------------------- #pragma omp target teams distribute parallel for private(k,j) for (i = 1; i <= gp02; i++) { for (k = 1; k <= ksize-1; k++) { #pragma omp simd private(pivot,coeff) for (j = 1; j <= gp12; j++) { //------------------------------------------------------------------- // subtract A*lhsZ[j][i]_vector(k-1) from lhsZ[j][i]_vector(k) // // rhs(k) = rhs(k) - A*rhs(k-1) //------------------------------------------------------------------- //matvec_sub(lhsZ[i][j][AA], rhs[k-1][k][i][j], rhs[k][j][i]); /* for(m = 0; m < 5; m++){ rhs[k][j][i][m] = rhs[k][j][i][m] - lhsZ[m][0][AA][k][i][j]*rhs[k-1][j][i][0] - lhsZ[m][1][AA][k][i][j]*rhs[k-1][j][i][1] - lhsZ[m][2][AA][k][i][j]*rhs[k-1][j][i][2] - lhsZ[m][3][AA][k][i][j]*rhs[k-1][j][i][3] - lhsZ[m][4][AA][k][i][j]*rhs[k-1][j][i][4]; } */ rhs[k][j][i][0] = rhs[k][j][i][0] - lhsZ[0][0][AA][k][i][j]*rhs[k-1][j][i][0] - lhsZ[0][1][AA][k][i][j]*rhs[k-1][j][i][1] - lhsZ[0][2][AA][k][i][j]*rhs[k-1][j][i][2] - lhsZ[0][3][AA][k][i][j]*rhs[k-1][j][i][3] - lhsZ[0][4][AA][k][i][j]*rhs[k-1][j][i][4]; rhs[k][j][i][1] = rhs[k][j][i][1] - lhsZ[1][0][AA][k][i][j]*rhs[k-1][j][i][0] - lhsZ[1][1][AA][k][i][j]*rhs[k-1][j][i][1] - lhsZ[1][2][AA][k][i][j]*rhs[k-1][j][i][2] - lhsZ[1][3][AA][k][i][j]*rhs[k-1][j][i][3] - lhsZ[1][4][AA][k][i][j]*rhs[k-1][j][i][4]; rhs[k][j][i][2] = rhs[k][j][i][2] - lhsZ[2][0][AA][k][i][j]*rhs[k-1][j][i][0] - lhsZ[2][1][AA][k][i][j]*rhs[k-1][j][i][1] - lhsZ[2][2][AA][k][i][j]*rhs[k-1][j][i][2] - lhsZ[2][3][AA][k][i][j]*rhs[k-1][j][i][3] - lhsZ[2][4][AA][k][i][j]*rhs[k-1][j][i][4]; rhs[k][j][i][3] = rhs[k][j][i][3] - lhsZ[3][0][AA][k][i][j]*rhs[k-1][j][i][0] - lhsZ[3][1][AA][k][i][j]*rhs[k-1][j][i][1] - lhsZ[3][2][AA][k][i][j]*rhs[k-1][j][i][2] - lhsZ[3][3][AA][k][i][j]*rhs[k-1][j][i][3] - lhsZ[3][4][AA][k][i][j]*rhs[k-1][j][i][4]; rhs[k][j][i][4] = rhs[k][j][i][4] - lhsZ[4][0][AA][k][i][j]*rhs[k-1][j][i][0] - lhsZ[4][1][AA][k][i][j]*rhs[k-1][j][i][1] - lhsZ[4][2][AA][k][i][j]*rhs[k-1][j][i][2] - lhsZ[4][3][AA][k][i][j]*rhs[k-1][j][i][3] - lhsZ[4][4][AA][k][i][j]*rhs[k-1][j][i][4]; //------------------------------------------------------------------- // B(k) = B(k) - C(k-1)*A(k) // matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k) //------------------------------------------------------------------- //matmul_sub(lhsZ[k-1][i][AA], lhsZ[j][k][i][j][CC], lhsZ[j][i][k][BB]); /* for(m = 0; m < 5; m++){ for(n = 0; n < 5; n++){ lhsZ[n][m][BB][k][i][j] = lhsZ[n][m][BB][k][i][j] - lhsZ[n][0][AA][k][i][j]*lhsZ[0][m][CC][k-1][i][j] - lhsZ[n][1][AA][k][i][j]*lhsZ[1][m][CC][k-1][i][j] - lhsZ[n][2][AA][k][i][j]*lhsZ[2][m][CC][k-1][i][j] - lhsZ[n][3][AA][k][i][j]*lhsZ[3][m][CC][k-1][i][j] - lhsZ[n][4][AA][k][i][j]*lhsZ[4][m][CC][k-1][i][j]; } } */ lhsZ[0][0][BB][k][i][j] = lhsZ[0][0][BB][k][i][j] - lhsZ[0][0][AA][k][i][j]*lhsZ[0][0][CC][k-1][i][j] - lhsZ[0][1][AA][k][i][j]*lhsZ[1][0][CC][k-1][i][j] - lhsZ[0][2][AA][k][i][j]*lhsZ[2][0][CC][k-1][i][j] - lhsZ[0][3][AA][k][i][j]*lhsZ[3][0][CC][k-1][i][j] - lhsZ[0][4][AA][k][i][j]*lhsZ[4][0][CC][k-1][i][j]; lhsZ[1][0][BB][k][i][j] = lhsZ[1][0][BB][k][i][j] - lhsZ[1][0][AA][k][i][j]*lhsZ[0][0][CC][k-1][i][j] - lhsZ[1][1][AA][k][i][j]*lhsZ[1][0][CC][k-1][i][j] - lhsZ[1][2][AA][k][i][j]*lhsZ[2][0][CC][k-1][i][j] - lhsZ[1][3][AA][k][i][j]*lhsZ[3][0][CC][k-1][i][j] - lhsZ[1][4][AA][k][i][j]*lhsZ[4][0][CC][k-1][i][j]; lhsZ[2][0][BB][k][i][j] = lhsZ[2][0][BB][k][i][j] - lhsZ[2][0][AA][k][i][j]*lhsZ[0][0][CC][k-1][i][j] - lhsZ[2][1][AA][k][i][j]*lhsZ[1][0][CC][k-1][i][j] - lhsZ[2][2][AA][k][i][j]*lhsZ[2][0][CC][k-1][i][j] - lhsZ[2][3][AA][k][i][j]*lhsZ[3][0][CC][k-1][i][j] - lhsZ[2][4][AA][k][i][j]*lhsZ[4][0][CC][k-1][i][j]; lhsZ[3][0][BB][k][i][j] = lhsZ[3][0][BB][k][i][j] - lhsZ[3][0][AA][k][i][j]*lhsZ[0][0][CC][k-1][i][j] - lhsZ[3][1][AA][k][i][j]*lhsZ[1][0][CC][k-1][i][j] - lhsZ[3][2][AA][k][i][j]*lhsZ[2][0][CC][k-1][i][j] - lhsZ[3][3][AA][k][i][j]*lhsZ[3][0][CC][k-1][i][j] - lhsZ[3][4][AA][k][i][j]*lhsZ[4][0][CC][k-1][i][j]; lhsZ[4][0][BB][k][i][j] = lhsZ[4][0][BB][k][i][j] - lhsZ[4][0][AA][k][i][j]*lhsZ[0][0][CC][k-1][i][j] - lhsZ[4][1][AA][k][i][j]*lhsZ[1][0][CC][k-1][i][j] - lhsZ[4][2][AA][k][i][j]*lhsZ[2][0][CC][k-1][i][j] - lhsZ[4][3][AA][k][i][j]*lhsZ[3][0][CC][k-1][i][j] - lhsZ[4][4][AA][k][i][j]*lhsZ[4][0][CC][k-1][i][j]; lhsZ[0][1][BB][k][i][j] = lhsZ[0][1][BB][k][i][j] - lhsZ[0][0][AA][k][i][j]*lhsZ[0][1][CC][k-1][i][j] - lhsZ[0][1][AA][k][i][j]*lhsZ[1][1][CC][k-1][i][j] - lhsZ[0][2][AA][k][i][j]*lhsZ[2][1][CC][k-1][i][j] - lhsZ[0][3][AA][k][i][j]*lhsZ[3][1][CC][k-1][i][j] - lhsZ[0][4][AA][k][i][j]*lhsZ[4][1][CC][k-1][i][j]; lhsZ[1][1][BB][k][i][j] = lhsZ[1][1][BB][k][i][j] - lhsZ[1][0][AA][k][i][j]*lhsZ[0][1][CC][k-1][i][j] - lhsZ[1][1][AA][k][i][j]*lhsZ[1][1][CC][k-1][i][j] - lhsZ[1][2][AA][k][i][j]*lhsZ[2][1][CC][k-1][i][j] - lhsZ[1][3][AA][k][i][j]*lhsZ[3][1][CC][k-1][i][j] - lhsZ[1][4][AA][k][i][j]*lhsZ[4][1][CC][k-1][i][j]; lhsZ[2][1][BB][k][i][j] = lhsZ[2][1][BB][k][i][j] - lhsZ[2][0][AA][k][i][j]*lhsZ[0][1][CC][k-1][i][j] - lhsZ[2][1][AA][k][i][j]*lhsZ[1][1][CC][k-1][i][j] - lhsZ[2][2][AA][k][i][j]*lhsZ[2][1][CC][k-1][i][j] - lhsZ[2][3][AA][k][i][j]*lhsZ[3][1][CC][k-1][i][j] - lhsZ[2][4][AA][k][i][j]*lhsZ[4][1][CC][k-1][i][j]; lhsZ[3][1][BB][k][i][j] = lhsZ[3][1][BB][k][i][j] - lhsZ[3][0][AA][k][i][j]*lhsZ[0][1][CC][k-1][i][j] - lhsZ[3][1][AA][k][i][j]*lhsZ[1][1][CC][k-1][i][j] - lhsZ[3][2][AA][k][i][j]*lhsZ[2][1][CC][k-1][i][j] - lhsZ[3][3][AA][k][i][j]*lhsZ[3][1][CC][k-1][i][j] - lhsZ[3][4][AA][k][i][j]*lhsZ[4][1][CC][k-1][i][j]; lhsZ[4][1][BB][k][i][j] = lhsZ[4][1][BB][k][i][j] - lhsZ[4][0][AA][k][i][j]*lhsZ[0][1][CC][k-1][i][j] - lhsZ[4][1][AA][k][i][j]*lhsZ[1][1][CC][k-1][i][j] - lhsZ[4][2][AA][k][i][j]*lhsZ[2][1][CC][k-1][i][j] - lhsZ[4][3][AA][k][i][j]*lhsZ[3][1][CC][k-1][i][j] - lhsZ[4][4][AA][k][i][j]*lhsZ[4][1][CC][k-1][i][j]; lhsZ[0][2][BB][k][i][j] = lhsZ[0][2][BB][k][i][j] - lhsZ[0][0][AA][k][i][j]*lhsZ[0][2][CC][k-1][i][j] - lhsZ[0][1][AA][k][i][j]*lhsZ[1][2][CC][k-1][i][j] - lhsZ[0][2][AA][k][i][j]*lhsZ[2][2][CC][k-1][i][j] - lhsZ[0][3][AA][k][i][j]*lhsZ[3][2][CC][k-1][i][j] - lhsZ[0][4][AA][k][i][j]*lhsZ[4][2][CC][k-1][i][j]; lhsZ[1][2][BB][k][i][j] = lhsZ[1][2][BB][k][i][j] - lhsZ[1][0][AA][k][i][j]*lhsZ[0][2][CC][k-1][i][j] - lhsZ[1][1][AA][k][i][j]*lhsZ[1][2][CC][k-1][i][j] - lhsZ[1][2][AA][k][i][j]*lhsZ[2][2][CC][k-1][i][j] - lhsZ[1][3][AA][k][i][j]*lhsZ[3][2][CC][k-1][i][j] - lhsZ[1][4][AA][k][i][j]*lhsZ[4][2][CC][k-1][i][j]; lhsZ[2][2][BB][k][i][j] = lhsZ[2][2][BB][k][i][j] - lhsZ[2][0][AA][k][i][j]*lhsZ[0][2][CC][k-1][i][j] - lhsZ[2][1][AA][k][i][j]*lhsZ[1][2][CC][k-1][i][j] - lhsZ[2][2][AA][k][i][j]*lhsZ[2][2][CC][k-1][i][j] - lhsZ[2][3][AA][k][i][j]*lhsZ[3][2][CC][k-1][i][j] - lhsZ[2][4][AA][k][i][j]*lhsZ[4][2][CC][k-1][i][j]; lhsZ[3][2][BB][k][i][j] = lhsZ[3][2][BB][k][i][j] - lhsZ[3][0][AA][k][i][j]*lhsZ[0][2][CC][k-1][i][j] - lhsZ[3][1][AA][k][i][j]*lhsZ[1][2][CC][k-1][i][j] - lhsZ[3][2][AA][k][i][j]*lhsZ[2][2][CC][k-1][i][j] - lhsZ[3][3][AA][k][i][j]*lhsZ[3][2][CC][k-1][i][j] - lhsZ[3][4][AA][k][i][j]*lhsZ[4][2][CC][k-1][i][j]; lhsZ[4][2][BB][k][i][j] = lhsZ[4][2][BB][k][i][j] - lhsZ[4][0][AA][k][i][j]*lhsZ[0][2][CC][k-1][i][j] - lhsZ[4][1][AA][k][i][j]*lhsZ[1][2][CC][k-1][i][j] - lhsZ[4][2][AA][k][i][j]*lhsZ[2][2][CC][k-1][i][j] - lhsZ[4][3][AA][k][i][j]*lhsZ[3][2][CC][k-1][i][j] - lhsZ[4][4][AA][k][i][j]*lhsZ[4][2][CC][k-1][i][j]; lhsZ[0][3][BB][k][i][j] = lhsZ[0][3][BB][k][i][j] - lhsZ[0][0][AA][k][i][j]*lhsZ[0][3][CC][k-1][i][j] - lhsZ[0][1][AA][k][i][j]*lhsZ[1][3][CC][k-1][i][j] - lhsZ[0][2][AA][k][i][j]*lhsZ[2][3][CC][k-1][i][j] - lhsZ[0][3][AA][k][i][j]*lhsZ[3][3][CC][k-1][i][j] - lhsZ[0][4][AA][k][i][j]*lhsZ[4][3][CC][k-1][i][j]; lhsZ[1][3][BB][k][i][j] = lhsZ[1][3][BB][k][i][j] - lhsZ[1][0][AA][k][i][j]*lhsZ[0][3][CC][k-1][i][j] - lhsZ[1][1][AA][k][i][j]*lhsZ[1][3][CC][k-1][i][j] - lhsZ[1][2][AA][k][i][j]*lhsZ[2][3][CC][k-1][i][j] - lhsZ[1][3][AA][k][i][j]*lhsZ[3][3][CC][k-1][i][j] - lhsZ[1][4][AA][k][i][j]*lhsZ[4][3][CC][k-1][i][j]; lhsZ[2][3][BB][k][i][j] = lhsZ[2][3][BB][k][i][j] - lhsZ[2][0][AA][k][i][j]*lhsZ[0][3][CC][k-1][i][j] - lhsZ[2][1][AA][k][i][j]*lhsZ[1][3][CC][k-1][i][j] - lhsZ[2][2][AA][k][i][j]*lhsZ[2][3][CC][k-1][i][j] - lhsZ[2][3][AA][k][i][j]*lhsZ[3][3][CC][k-1][i][j] - lhsZ[2][4][AA][k][i][j]*lhsZ[4][3][CC][k-1][i][j]; lhsZ[3][3][BB][k][i][j] = lhsZ[3][3][BB][k][i][j] - lhsZ[3][0][AA][k][i][j]*lhsZ[0][3][CC][k-1][i][j] - lhsZ[3][1][AA][k][i][j]*lhsZ[1][3][CC][k-1][i][j] - lhsZ[3][2][AA][k][i][j]*lhsZ[2][3][CC][k-1][i][j] - lhsZ[3][3][AA][k][i][j]*lhsZ[3][3][CC][k-1][i][j] - lhsZ[3][4][AA][k][i][j]*lhsZ[4][3][CC][k-1][i][j]; lhsZ[4][3][BB][k][i][j] = lhsZ[4][3][BB][k][i][j] - lhsZ[4][0][AA][k][i][j]*lhsZ[0][3][CC][k-1][i][j] - lhsZ[4][1][AA][k][i][j]*lhsZ[1][3][CC][k-1][i][j] - lhsZ[4][2][AA][k][i][j]*lhsZ[2][3][CC][k-1][i][j] - lhsZ[4][3][AA][k][i][j]*lhsZ[3][3][CC][k-1][i][j] - lhsZ[4][4][AA][k][i][j]*lhsZ[4][3][CC][k-1][i][j]; lhsZ[0][4][BB][k][i][j] = lhsZ[0][4][BB][k][i][j] - lhsZ[0][0][AA][k][i][j]*lhsZ[0][4][CC][k-1][i][j] - lhsZ[0][1][AA][k][i][j]*lhsZ[1][4][CC][k-1][i][j] - lhsZ[0][2][AA][k][i][j]*lhsZ[2][4][CC][k-1][i][j] - lhsZ[0][3][AA][k][i][j]*lhsZ[3][4][CC][k-1][i][j] - lhsZ[0][4][AA][k][i][j]*lhsZ[4][4][CC][k-1][i][j]; lhsZ[1][4][BB][k][i][j] = lhsZ[1][4][BB][k][i][j] - lhsZ[1][0][AA][k][i][j]*lhsZ[0][4][CC][k-1][i][j] - lhsZ[1][1][AA][k][i][j]*lhsZ[1][4][CC][k-1][i][j] - lhsZ[1][2][AA][k][i][j]*lhsZ[2][4][CC][k-1][i][j] - lhsZ[1][3][AA][k][i][j]*lhsZ[3][4][CC][k-1][i][j] - lhsZ[1][4][AA][k][i][j]*lhsZ[4][4][CC][k-1][i][j]; lhsZ[2][4][BB][k][i][j] = lhsZ[2][4][BB][k][i][j] - lhsZ[2][0][AA][k][i][j]*lhsZ[0][4][CC][k-1][i][j] - lhsZ[2][1][AA][k][i][j]*lhsZ[1][4][CC][k-1][i][j] - lhsZ[2][2][AA][k][i][j]*lhsZ[2][4][CC][k-1][i][j] - lhsZ[2][3][AA][k][i][j]*lhsZ[3][4][CC][k-1][i][j] - lhsZ[2][4][AA][k][i][j]*lhsZ[4][4][CC][k-1][i][j]; lhsZ[3][4][BB][k][i][j] = lhsZ[3][4][BB][k][i][j] - lhsZ[3][0][AA][k][i][j]*lhsZ[0][4][CC][k-1][i][j] - lhsZ[3][1][AA][k][i][j]*lhsZ[1][4][CC][k-1][i][j] - lhsZ[3][2][AA][k][i][j]*lhsZ[2][4][CC][k-1][i][j] - lhsZ[3][3][AA][k][i][j]*lhsZ[3][4][CC][k-1][i][j] - lhsZ[3][4][AA][k][i][j]*lhsZ[4][4][CC][k-1][i][j]; lhsZ[4][4][BB][k][i][j] = lhsZ[4][4][BB][k][i][j] - lhsZ[4][0][AA][k][i][j]*lhsZ[0][4][CC][k-1][i][j] - lhsZ[4][1][AA][k][i][j]*lhsZ[1][4][CC][k-1][i][j] - lhsZ[4][2][AA][k][i][j]*lhsZ[2][4][CC][k-1][i][j] - lhsZ[4][3][AA][k][i][j]*lhsZ[3][4][CC][k-1][i][j] - lhsZ[4][4][AA][k][i][j]*lhsZ[4][4][CC][k-1][i][j]; //------------------------------------------------------------------- // multiply c[k][j][i] by b_inverse and copy back to c // multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs //------------------------------------------------------------------- //binvcrhs( lhsZ[k][i][BB], lhsZ[j][k][i][j][CC], rhs[k][j][i] ); /* for(m = 0; m < 5; m++){ pivot = 1.00/lhsZ[m][m][BB][k][i][j]; for(n = m+1; n < 5; n++){ lhsZ[m][n][BB][k][i][j] = lhsZ[m][n][BB][k][i][j]*pivot; } lhsZ[m][0][CC][k][i][j] = lhsZ[m][0][CC][k][i][j]*pivot; lhsZ[m][1][CC][k][i][j] = lhsZ[m][1][CC][k][i][j]*pivot; lhsZ[m][2][CC][k][i][j] = lhsZ[m][2][CC][k][i][j]*pivot; lhsZ[m][3][CC][k][i][j] = lhsZ[m][3][CC][k][i][j]*pivot; lhsZ[m][4][CC][k][i][j] = lhsZ[m][4][CC][k][i][j]*pivot; rhs[k][j][i][m] = rhs[k][j][i][m]*pivot; for(n = 0; n < 5; n++){ if(n != m){ coeff = lhsZ[n][m][BB][k][i][j]; for(z = m+1; z < 5; z++){ lhsZ[n][z][BB][k][i][j] = lhsZ[n][z][BB][k][i][j] - coeff*lhsZ[m][z][BB][k][i][j]; } lhsZ[n][0][CC][k][i][j] = lhsZ[n][0][CC][k][i][j] - coeff*lhsZ[m][0][CC][k][i][j]; lhsZ[n][1][CC][k][i][j] = lhsZ[n][1][CC][k][i][j] - coeff*lhsZ[m][1][CC][k][i][j]; lhsZ[n][2][CC][k][i][j] = lhsZ[n][2][CC][k][i][j] - coeff*lhsZ[m][2][CC][k][i][j]; lhsZ[n][3][CC][k][i][j] = lhsZ[n][3][CC][k][i][j] - coeff*lhsZ[m][3][CC][k][i][j]; lhsZ[n][4][CC][k][i][j] = lhsZ[n][4][CC][k][i][j] - coeff*lhsZ[m][4][CC][k][i][j]; rhs[k][j][i][n] = rhs[k][j][i][n] - coeff*rhs[k][j][i][m]; } } } */ pivot = 1.00/lhsZ[0][0][BB][k][i][j]; lhsZ[0][1][BB][k][i][j] = lhsZ[0][1][BB][k][i][j]*pivot; lhsZ[0][2][BB][k][i][j] = lhsZ[0][2][BB][k][i][j]*pivot; lhsZ[0][3][BB][k][i][j] = lhsZ[0][3][BB][k][i][j]*pivot; lhsZ[0][4][BB][k][i][j] = lhsZ[0][4][BB][k][i][j]*pivot; lhsZ[0][0][CC][k][i][j] = lhsZ[0][0][CC][k][i][j]*pivot; lhsZ[0][1][CC][k][i][j] = lhsZ[0][1][CC][k][i][j]*pivot; lhsZ[0][2][CC][k][i][j] = lhsZ[0][2][CC][k][i][j]*pivot; lhsZ[0][3][CC][k][i][j] = lhsZ[0][3][CC][k][i][j]*pivot; lhsZ[0][4][CC][k][i][j] = lhsZ[0][4][CC][k][i][j]*pivot; rhs[k][j][i][0] = rhs[k][j][i][0] *pivot; coeff = lhsZ[1][0][BB][k][i][j]; lhsZ[1][1][BB][k][i][j]= lhsZ[1][1][BB][k][i][j] - coeff*lhsZ[0][1][BB][k][i][j]; lhsZ[1][2][BB][k][i][j]= lhsZ[1][2][BB][k][i][j] - coeff*lhsZ[0][2][BB][k][i][j]; lhsZ[1][3][BB][k][i][j]= lhsZ[1][3][BB][k][i][j] - coeff*lhsZ[0][3][BB][k][i][j]; lhsZ[1][4][BB][k][i][j]= lhsZ[1][4][BB][k][i][j] - coeff*lhsZ[0][4][BB][k][i][j]; lhsZ[1][0][CC][k][i][j] = lhsZ[1][0][CC][k][i][j] - coeff*lhsZ[0][0][CC][k][i][j]; lhsZ[1][1][CC][k][i][j] = lhsZ[1][1][CC][k][i][j] - coeff*lhsZ[0][1][CC][k][i][j]; lhsZ[1][2][CC][k][i][j] = lhsZ[1][2][CC][k][i][j] - coeff*lhsZ[0][2][CC][k][i][j]; lhsZ[1][3][CC][k][i][j] = lhsZ[1][3][CC][k][i][j] - coeff*lhsZ[0][3][CC][k][i][j]; lhsZ[1][4][CC][k][i][j] = lhsZ[1][4][CC][k][i][j] - coeff*lhsZ[0][4][CC][k][i][j]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][0]; coeff = lhsZ[2][0][BB][k][i][j]; lhsZ[2][1][BB][k][i][j]= lhsZ[2][1][BB][k][i][j] - coeff*lhsZ[0][1][BB][k][i][j]; lhsZ[2][2][BB][k][i][j]= lhsZ[2][2][BB][k][i][j] - coeff*lhsZ[0][2][BB][k][i][j]; lhsZ[2][3][BB][k][i][j]= lhsZ[2][3][BB][k][i][j] - coeff*lhsZ[0][3][BB][k][i][j]; lhsZ[2][4][BB][k][i][j]= lhsZ[2][4][BB][k][i][j] - coeff*lhsZ[0][4][BB][k][i][j]; lhsZ[2][0][CC][k][i][j] = lhsZ[2][0][CC][k][i][j] - coeff*lhsZ[0][0][CC][k][i][j]; lhsZ[2][1][CC][k][i][j] = lhsZ[2][1][CC][k][i][j] - coeff*lhsZ[0][1][CC][k][i][j]; lhsZ[2][2][CC][k][i][j] = lhsZ[2][2][CC][k][i][j] - coeff*lhsZ[0][2][CC][k][i][j]; lhsZ[2][3][CC][k][i][j] = lhsZ[2][3][CC][k][i][j] - coeff*lhsZ[0][3][CC][k][i][j]; lhsZ[2][4][CC][k][i][j] = lhsZ[2][4][CC][k][i][j] - coeff*lhsZ[0][4][CC][k][i][j]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][0]; coeff = lhsZ[3][0][BB][k][i][j]; lhsZ[3][1][BB][k][i][j]= lhsZ[3][1][BB][k][i][j] - coeff*lhsZ[0][1][BB][k][i][j]; lhsZ[3][2][BB][k][i][j]= lhsZ[3][2][BB][k][i][j] - coeff*lhsZ[0][2][BB][k][i][j]; lhsZ[3][3][BB][k][i][j]= lhsZ[3][3][BB][k][i][j] - coeff*lhsZ[0][3][BB][k][i][j]; lhsZ[3][4][BB][k][i][j]= lhsZ[3][4][BB][k][i][j] - coeff*lhsZ[0][4][BB][k][i][j]; lhsZ[3][0][CC][k][i][j] = lhsZ[3][0][CC][k][i][j] - coeff*lhsZ[0][0][CC][k][i][j]; lhsZ[3][1][CC][k][i][j] = lhsZ[3][1][CC][k][i][j] - coeff*lhsZ[0][1][CC][k][i][j]; lhsZ[3][2][CC][k][i][j] = lhsZ[3][2][CC][k][i][j] - coeff*lhsZ[0][2][CC][k][i][j]; lhsZ[3][3][CC][k][i][j] = lhsZ[3][3][CC][k][i][j] - coeff*lhsZ[0][3][CC][k][i][j]; lhsZ[3][4][CC][k][i][j] = lhsZ[3][4][CC][k][i][j] - coeff*lhsZ[0][4][CC][k][i][j]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][0]; coeff = lhsZ[4][0][BB][k][i][j]; lhsZ[4][1][BB][k][i][j]= lhsZ[4][1][BB][k][i][j] - coeff*lhsZ[0][1][BB][k][i][j]; lhsZ[4][2][BB][k][i][j]= lhsZ[4][2][BB][k][i][j] - coeff*lhsZ[0][2][BB][k][i][j]; lhsZ[4][3][BB][k][i][j]= lhsZ[4][3][BB][k][i][j] - coeff*lhsZ[0][3][BB][k][i][j]; lhsZ[4][4][BB][k][i][j]= lhsZ[4][4][BB][k][i][j] - coeff*lhsZ[0][4][BB][k][i][j]; lhsZ[4][0][CC][k][i][j] = lhsZ[4][0][CC][k][i][j] - coeff*lhsZ[0][0][CC][k][i][j]; lhsZ[4][1][CC][k][i][j] = lhsZ[4][1][CC][k][i][j] - coeff*lhsZ[0][1][CC][k][i][j]; lhsZ[4][2][CC][k][i][j] = lhsZ[4][2][CC][k][i][j] - coeff*lhsZ[0][2][CC][k][i][j]; lhsZ[4][3][CC][k][i][j] = lhsZ[4][3][CC][k][i][j] - coeff*lhsZ[0][3][CC][k][i][j]; lhsZ[4][4][CC][k][i][j] = lhsZ[4][4][CC][k][i][j] - coeff*lhsZ[0][4][CC][k][i][j]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][0]; pivot = 1.00/lhsZ[1][1][BB][k][i][j]; lhsZ[1][2][BB][k][i][j] = lhsZ[1][2][BB][k][i][j]*pivot; lhsZ[1][3][BB][k][i][j] = lhsZ[1][3][BB][k][i][j]*pivot; lhsZ[1][4][BB][k][i][j] = lhsZ[1][4][BB][k][i][j]*pivot; lhsZ[1][0][CC][k][i][j] = lhsZ[1][0][CC][k][i][j]*pivot; lhsZ[1][1][CC][k][i][j] = lhsZ[1][1][CC][k][i][j]*pivot; lhsZ[1][2][CC][k][i][j] = lhsZ[1][2][CC][k][i][j]*pivot; lhsZ[1][3][CC][k][i][j] = lhsZ[1][3][CC][k][i][j]*pivot; lhsZ[1][4][CC][k][i][j] = lhsZ[1][4][CC][k][i][j]*pivot; rhs[k][j][i][1] = rhs[k][j][i][1] *pivot; coeff = lhsZ[0][1][BB][k][i][j]; lhsZ[0][2][BB][k][i][j]= lhsZ[0][2][BB][k][i][j] - coeff*lhsZ[1][2][BB][k][i][j]; lhsZ[0][3][BB][k][i][j]= lhsZ[0][3][BB][k][i][j] - coeff*lhsZ[1][3][BB][k][i][j]; lhsZ[0][4][BB][k][i][j]= lhsZ[0][4][BB][k][i][j] - coeff*lhsZ[1][4][BB][k][i][j]; lhsZ[0][0][CC][k][i][j] = lhsZ[0][0][CC][k][i][j] - coeff*lhsZ[1][0][CC][k][i][j]; lhsZ[0][1][CC][k][i][j] = lhsZ[0][1][CC][k][i][j] - coeff*lhsZ[1][1][CC][k][i][j]; lhsZ[0][2][CC][k][i][j] = lhsZ[0][2][CC][k][i][j] - coeff*lhsZ[1][2][CC][k][i][j]; lhsZ[0][3][CC][k][i][j] = lhsZ[0][3][CC][k][i][j] - coeff*lhsZ[1][3][CC][k][i][j]; lhsZ[0][4][CC][k][i][j] = lhsZ[0][4][CC][k][i][j] - coeff*lhsZ[1][4][CC][k][i][j]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][1]; coeff = lhsZ[2][1][BB][k][i][j]; lhsZ[2][2][BB][k][i][j]= lhsZ[2][2][BB][k][i][j] - coeff*lhsZ[1][2][BB][k][i][j]; lhsZ[2][3][BB][k][i][j]= lhsZ[2][3][BB][k][i][j] - coeff*lhsZ[1][3][BB][k][i][j]; lhsZ[2][4][BB][k][i][j]= lhsZ[2][4][BB][k][i][j] - coeff*lhsZ[1][4][BB][k][i][j]; lhsZ[2][0][CC][k][i][j] = lhsZ[2][0][CC][k][i][j] - coeff*lhsZ[1][0][CC][k][i][j]; lhsZ[2][1][CC][k][i][j] = lhsZ[2][1][CC][k][i][j] - coeff*lhsZ[1][1][CC][k][i][j]; lhsZ[2][2][CC][k][i][j] = lhsZ[2][2][CC][k][i][j] - coeff*lhsZ[1][2][CC][k][i][j]; lhsZ[2][3][CC][k][i][j] = lhsZ[2][3][CC][k][i][j] - coeff*lhsZ[1][3][CC][k][i][j]; lhsZ[2][4][CC][k][i][j] = lhsZ[2][4][CC][k][i][j] - coeff*lhsZ[1][4][CC][k][i][j]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][1]; coeff = lhsZ[3][1][BB][k][i][j]; lhsZ[3][2][BB][k][i][j]= lhsZ[3][2][BB][k][i][j] - coeff*lhsZ[1][2][BB][k][i][j]; lhsZ[3][3][BB][k][i][j]= lhsZ[3][3][BB][k][i][j] - coeff*lhsZ[1][3][BB][k][i][j]; lhsZ[3][4][BB][k][i][j]= lhsZ[3][4][BB][k][i][j] - coeff*lhsZ[1][4][BB][k][i][j]; lhsZ[3][0][CC][k][i][j] = lhsZ[3][0][CC][k][i][j] - coeff*lhsZ[1][0][CC][k][i][j]; lhsZ[3][1][CC][k][i][j] = lhsZ[3][1][CC][k][i][j] - coeff*lhsZ[1][1][CC][k][i][j]; lhsZ[3][2][CC][k][i][j] = lhsZ[3][2][CC][k][i][j] - coeff*lhsZ[1][2][CC][k][i][j]; lhsZ[3][3][CC][k][i][j] = lhsZ[3][3][CC][k][i][j] - coeff*lhsZ[1][3][CC][k][i][j]; lhsZ[3][4][CC][k][i][j] = lhsZ[3][4][CC][k][i][j] - coeff*lhsZ[1][4][CC][k][i][j]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][1]; coeff = lhsZ[4][1][BB][k][i][j]; lhsZ[4][2][BB][k][i][j]= lhsZ[4][2][BB][k][i][j] - coeff*lhsZ[1][2][BB][k][i][j]; lhsZ[4][3][BB][k][i][j]= lhsZ[4][3][BB][k][i][j] - coeff*lhsZ[1][3][BB][k][i][j]; lhsZ[4][4][BB][k][i][j]= lhsZ[4][4][BB][k][i][j] - coeff*lhsZ[1][4][BB][k][i][j]; lhsZ[4][0][CC][k][i][j] = lhsZ[4][0][CC][k][i][j] - coeff*lhsZ[1][0][CC][k][i][j]; lhsZ[4][1][CC][k][i][j] = lhsZ[4][1][CC][k][i][j] - coeff*lhsZ[1][1][CC][k][i][j]; lhsZ[4][2][CC][k][i][j] = lhsZ[4][2][CC][k][i][j] - coeff*lhsZ[1][2][CC][k][i][j]; lhsZ[4][3][CC][k][i][j] = lhsZ[4][3][CC][k][i][j] - coeff*lhsZ[1][3][CC][k][i][j]; lhsZ[4][4][CC][k][i][j] = lhsZ[4][4][CC][k][i][j] - coeff*lhsZ[1][4][CC][k][i][j]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][1]; pivot = 1.00/lhsZ[2][2][BB][k][i][j]; lhsZ[2][3][BB][k][i][j] = lhsZ[2][3][BB][k][i][j]*pivot; lhsZ[2][4][BB][k][i][j] = lhsZ[2][4][BB][k][i][j]*pivot; lhsZ[2][0][CC][k][i][j] = lhsZ[2][0][CC][k][i][j]*pivot; lhsZ[2][1][CC][k][i][j] = lhsZ[2][1][CC][k][i][j]*pivot; lhsZ[2][2][CC][k][i][j] = lhsZ[2][2][CC][k][i][j]*pivot; lhsZ[2][3][CC][k][i][j] = lhsZ[2][3][CC][k][i][j]*pivot; lhsZ[2][4][CC][k][i][j] = lhsZ[2][4][CC][k][i][j]*pivot; rhs[k][j][i][2] = rhs[k][j][i][2] *pivot; coeff = lhsZ[0][2][BB][k][i][j]; lhsZ[0][3][BB][k][i][j]= lhsZ[0][3][BB][k][i][j] - coeff*lhsZ[2][3][BB][k][i][j]; lhsZ[0][4][BB][k][i][j]= lhsZ[0][4][BB][k][i][j] - coeff*lhsZ[2][4][BB][k][i][j]; lhsZ[0][0][CC][k][i][j] = lhsZ[0][0][CC][k][i][j] - coeff*lhsZ[2][0][CC][k][i][j]; lhsZ[0][1][CC][k][i][j] = lhsZ[0][1][CC][k][i][j] - coeff*lhsZ[2][1][CC][k][i][j]; lhsZ[0][2][CC][k][i][j] = lhsZ[0][2][CC][k][i][j] - coeff*lhsZ[2][2][CC][k][i][j]; lhsZ[0][3][CC][k][i][j] = lhsZ[0][3][CC][k][i][j] - coeff*lhsZ[2][3][CC][k][i][j]; lhsZ[0][4][CC][k][i][j] = lhsZ[0][4][CC][k][i][j] - coeff*lhsZ[2][4][CC][k][i][j]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][2]; coeff = lhsZ[1][2][BB][k][i][j]; lhsZ[1][3][BB][k][i][j]= lhsZ[1][3][BB][k][i][j] - coeff*lhsZ[2][3][BB][k][i][j]; lhsZ[1][4][BB][k][i][j]= lhsZ[1][4][BB][k][i][j] - coeff*lhsZ[2][4][BB][k][i][j]; lhsZ[1][0][CC][k][i][j] = lhsZ[1][0][CC][k][i][j] - coeff*lhsZ[2][0][CC][k][i][j]; lhsZ[1][1][CC][k][i][j] = lhsZ[1][1][CC][k][i][j] - coeff*lhsZ[2][1][CC][k][i][j]; lhsZ[1][2][CC][k][i][j] = lhsZ[1][2][CC][k][i][j] - coeff*lhsZ[2][2][CC][k][i][j]; lhsZ[1][3][CC][k][i][j] = lhsZ[1][3][CC][k][i][j] - coeff*lhsZ[2][3][CC][k][i][j]; lhsZ[1][4][CC][k][i][j] = lhsZ[1][4][CC][k][i][j] - coeff*lhsZ[2][4][CC][k][i][j]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][2]; coeff = lhsZ[3][2][BB][k][i][j]; lhsZ[3][3][BB][k][i][j]= lhsZ[3][3][BB][k][i][j] - coeff*lhsZ[2][3][BB][k][i][j]; lhsZ[3][4][BB][k][i][j]= lhsZ[3][4][BB][k][i][j] - coeff*lhsZ[2][4][BB][k][i][j]; lhsZ[3][0][CC][k][i][j] = lhsZ[3][0][CC][k][i][j] - coeff*lhsZ[2][0][CC][k][i][j]; lhsZ[3][1][CC][k][i][j] = lhsZ[3][1][CC][k][i][j] - coeff*lhsZ[2][1][CC][k][i][j]; lhsZ[3][2][CC][k][i][j] = lhsZ[3][2][CC][k][i][j] - coeff*lhsZ[2][2][CC][k][i][j]; lhsZ[3][3][CC][k][i][j] = lhsZ[3][3][CC][k][i][j] - coeff*lhsZ[2][3][CC][k][i][j]; lhsZ[3][4][CC][k][i][j] = lhsZ[3][4][CC][k][i][j] - coeff*lhsZ[2][4][CC][k][i][j]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][2]; coeff = lhsZ[4][2][BB][k][i][j]; lhsZ[4][3][BB][k][i][j]= lhsZ[4][3][BB][k][i][j] - coeff*lhsZ[2][3][BB][k][i][j]; lhsZ[4][4][BB][k][i][j]= lhsZ[4][4][BB][k][i][j] - coeff*lhsZ[2][4][BB][k][i][j]; lhsZ[4][0][CC][k][i][j] = lhsZ[4][0][CC][k][i][j] - coeff*lhsZ[2][0][CC][k][i][j]; lhsZ[4][1][CC][k][i][j] = lhsZ[4][1][CC][k][i][j] - coeff*lhsZ[2][1][CC][k][i][j]; lhsZ[4][2][CC][k][i][j] = lhsZ[4][2][CC][k][i][j] - coeff*lhsZ[2][2][CC][k][i][j]; lhsZ[4][3][CC][k][i][j] = lhsZ[4][3][CC][k][i][j] - coeff*lhsZ[2][3][CC][k][i][j]; lhsZ[4][4][CC][k][i][j] = lhsZ[4][4][CC][k][i][j] - coeff*lhsZ[2][4][CC][k][i][j]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][2]; pivot = 1.00/lhsZ[3][3][BB][k][i][j]; lhsZ[3][4][BB][k][i][j] = lhsZ[3][4][BB][k][i][j]*pivot; lhsZ[3][0][CC][k][i][j] = lhsZ[3][0][CC][k][i][j]*pivot; lhsZ[3][1][CC][k][i][j] = lhsZ[3][1][CC][k][i][j]*pivot; lhsZ[3][2][CC][k][i][j] = lhsZ[3][2][CC][k][i][j]*pivot; lhsZ[3][3][CC][k][i][j] = lhsZ[3][3][CC][k][i][j]*pivot; lhsZ[3][4][CC][k][i][j] = lhsZ[3][4][CC][k][i][j]*pivot; rhs[k][j][i][3] = rhs[k][j][i][3] *pivot; coeff = lhsZ[0][3][BB][k][i][j]; lhsZ[0][4][BB][k][i][j]= lhsZ[0][4][BB][k][i][j] - coeff*lhsZ[3][4][BB][k][i][j]; lhsZ[0][0][CC][k][i][j] = lhsZ[0][0][CC][k][i][j] - coeff*lhsZ[3][0][CC][k][i][j]; lhsZ[0][1][CC][k][i][j] = lhsZ[0][1][CC][k][i][j] - coeff*lhsZ[3][1][CC][k][i][j]; lhsZ[0][2][CC][k][i][j] = lhsZ[0][2][CC][k][i][j] - coeff*lhsZ[3][2][CC][k][i][j]; lhsZ[0][3][CC][k][i][j] = lhsZ[0][3][CC][k][i][j] - coeff*lhsZ[3][3][CC][k][i][j]; lhsZ[0][4][CC][k][i][j] = lhsZ[0][4][CC][k][i][j] - coeff*lhsZ[3][4][CC][k][i][j]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][3]; coeff = lhsZ[1][3][BB][k][i][j]; lhsZ[1][4][BB][k][i][j]= lhsZ[1][4][BB][k][i][j] - coeff*lhsZ[3][4][BB][k][i][j]; lhsZ[1][0][CC][k][i][j] = lhsZ[1][0][CC][k][i][j] - coeff*lhsZ[3][0][CC][k][i][j]; lhsZ[1][1][CC][k][i][j] = lhsZ[1][1][CC][k][i][j] - coeff*lhsZ[3][1][CC][k][i][j]; lhsZ[1][2][CC][k][i][j] = lhsZ[1][2][CC][k][i][j] - coeff*lhsZ[3][2][CC][k][i][j]; lhsZ[1][3][CC][k][i][j] = lhsZ[1][3][CC][k][i][j] - coeff*lhsZ[3][3][CC][k][i][j]; lhsZ[1][4][CC][k][i][j] = lhsZ[1][4][CC][k][i][j] - coeff*lhsZ[3][4][CC][k][i][j]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][3]; coeff = lhsZ[2][3][BB][k][i][j]; lhsZ[2][4][BB][k][i][j]= lhsZ[2][4][BB][k][i][j] - coeff*lhsZ[3][4][BB][k][i][j]; lhsZ[2][0][CC][k][i][j] = lhsZ[2][0][CC][k][i][j] - coeff*lhsZ[3][0][CC][k][i][j]; lhsZ[2][1][CC][k][i][j] = lhsZ[2][1][CC][k][i][j] - coeff*lhsZ[3][1][CC][k][i][j]; lhsZ[2][2][CC][k][i][j] = lhsZ[2][2][CC][k][i][j] - coeff*lhsZ[3][2][CC][k][i][j]; lhsZ[2][3][CC][k][i][j] = lhsZ[2][3][CC][k][i][j] - coeff*lhsZ[3][3][CC][k][i][j]; lhsZ[2][4][CC][k][i][j] = lhsZ[2][4][CC][k][i][j] - coeff*lhsZ[3][4][CC][k][i][j]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][3]; coeff = lhsZ[4][3][BB][k][i][j]; lhsZ[4][4][BB][k][i][j]= lhsZ[4][4][BB][k][i][j] - coeff*lhsZ[3][4][BB][k][i][j]; lhsZ[4][0][CC][k][i][j] = lhsZ[4][0][CC][k][i][j] - coeff*lhsZ[3][0][CC][k][i][j]; lhsZ[4][1][CC][k][i][j] = lhsZ[4][1][CC][k][i][j] - coeff*lhsZ[3][1][CC][k][i][j]; lhsZ[4][2][CC][k][i][j] = lhsZ[4][2][CC][k][i][j] - coeff*lhsZ[3][2][CC][k][i][j]; lhsZ[4][3][CC][k][i][j] = lhsZ[4][3][CC][k][i][j] - coeff*lhsZ[3][3][CC][k][i][j]; lhsZ[4][4][CC][k][i][j] = lhsZ[4][4][CC][k][i][j] - coeff*lhsZ[3][4][CC][k][i][j]; rhs[k][j][i][4] = rhs[k][j][i][4] - coeff*rhs[k][j][i][3]; pivot = 1.00/lhsZ[4][4][BB][k][i][j]; lhsZ[4][0][CC][k][i][j] = lhsZ[4][0][CC][k][i][j]*pivot; lhsZ[4][1][CC][k][i][j] = lhsZ[4][1][CC][k][i][j]*pivot; lhsZ[4][2][CC][k][i][j] = lhsZ[4][2][CC][k][i][j]*pivot; lhsZ[4][3][CC][k][i][j] = lhsZ[4][3][CC][k][i][j]*pivot; lhsZ[4][4][CC][k][i][j] = lhsZ[4][4][CC][k][i][j]*pivot; rhs[k][j][i][4] = rhs[k][j][i][4] *pivot; coeff = lhsZ[0][4][BB][k][i][j]; lhsZ[0][0][CC][k][i][j] = lhsZ[0][0][CC][k][i][j] - coeff*lhsZ[4][0][CC][k][i][j]; lhsZ[0][1][CC][k][i][j] = lhsZ[0][1][CC][k][i][j] - coeff*lhsZ[4][1][CC][k][i][j]; lhsZ[0][2][CC][k][i][j] = lhsZ[0][2][CC][k][i][j] - coeff*lhsZ[4][2][CC][k][i][j]; lhsZ[0][3][CC][k][i][j] = lhsZ[0][3][CC][k][i][j] - coeff*lhsZ[4][3][CC][k][i][j]; lhsZ[0][4][CC][k][i][j] = lhsZ[0][4][CC][k][i][j] - coeff*lhsZ[4][4][CC][k][i][j]; rhs[k][j][i][0] = rhs[k][j][i][0] - coeff*rhs[k][j][i][4]; coeff = lhsZ[1][4][BB][k][i][j]; lhsZ[1][0][CC][k][i][j] = lhsZ[1][0][CC][k][i][j] - coeff*lhsZ[4][0][CC][k][i][j]; lhsZ[1][1][CC][k][i][j] = lhsZ[1][1][CC][k][i][j] - coeff*lhsZ[4][1][CC][k][i][j]; lhsZ[1][2][CC][k][i][j] = lhsZ[1][2][CC][k][i][j] - coeff*lhsZ[4][2][CC][k][i][j]; lhsZ[1][3][CC][k][i][j] = lhsZ[1][3][CC][k][i][j] - coeff*lhsZ[4][3][CC][k][i][j]; lhsZ[1][4][CC][k][i][j] = lhsZ[1][4][CC][k][i][j] - coeff*lhsZ[4][4][CC][k][i][j]; rhs[k][j][i][1] = rhs[k][j][i][1] - coeff*rhs[k][j][i][4]; coeff = lhsZ[2][4][BB][k][i][j]; lhsZ[2][0][CC][k][i][j] = lhsZ[2][0][CC][k][i][j] - coeff*lhsZ[4][0][CC][k][i][j]; lhsZ[2][1][CC][k][i][j] = lhsZ[2][1][CC][k][i][j] - coeff*lhsZ[4][1][CC][k][i][j]; lhsZ[2][2][CC][k][i][j] = lhsZ[2][2][CC][k][i][j] - coeff*lhsZ[4][2][CC][k][i][j]; lhsZ[2][3][CC][k][i][j] = lhsZ[2][3][CC][k][i][j] - coeff*lhsZ[4][3][CC][k][i][j]; lhsZ[2][4][CC][k][i][j] = lhsZ[2][4][CC][k][i][j] - coeff*lhsZ[4][4][CC][k][i][j]; rhs[k][j][i][2] = rhs[k][j][i][2] - coeff*rhs[k][j][i][4]; coeff = lhsZ[3][4][BB][k][i][j]; lhsZ[3][0][CC][k][i][j] = lhsZ[3][0][CC][k][i][j] - coeff*lhsZ[4][0][CC][k][i][j]; lhsZ[3][1][CC][k][i][j] = lhsZ[3][1][CC][k][i][j] - coeff*lhsZ[4][1][CC][k][i][j]; lhsZ[3][2][CC][k][i][j] = lhsZ[3][2][CC][k][i][j] - coeff*lhsZ[4][2][CC][k][i][j]; lhsZ[3][3][CC][k][i][j] = lhsZ[3][3][CC][k][i][j] - coeff*lhsZ[4][3][CC][k][i][j]; lhsZ[3][4][CC][k][i][j] = lhsZ[3][4][CC][k][i][j] - coeff*lhsZ[4][4][CC][k][i][j]; rhs[k][j][i][3] = rhs[k][j][i][3] - coeff*rhs[k][j][i][4]; }/*end loop k*/ }/*end loop i*/ }/*end loop j*/ //--------------------------------------------------------------------- // Now finish up special cases for last cell //--------------------------------------------------------------------- //--------------------------------------------------------------------- // rhs(ksize) = rhs(ksize) - A*rhs(ksize-1) //--------------------------------------------------------------------- //matvec_sub(lhsZ[i][j][AA], rhs[ksize-1][ksize][i][j], rhs[ksize][j][i]); #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (i = 1; i <= gp02; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= gp12; j++) { /* for(m = 0; m < 5; m++){ rhs[ksize][j][i][m] = rhs[ksize][j][i][m] - lhsZ[m][0][AA][ksize][i][j]*rhs[ksize-1][j][i][0] - lhsZ[m][1][AA][ksize][i][j]*rhs[ksize-1][j][i][1] - lhsZ[m][2][AA][ksize][i][j]*rhs[ksize-1][j][i][2] - lhsZ[m][3][AA][ksize][i][j]*rhs[ksize-1][j][i][3] - lhsZ[m][4][AA][ksize][i][j]*rhs[ksize-1][j][i][4]; } */ rhs[ksize][j][i][0] = rhs[ksize][j][i][0] - lhsZ[0][0][AA][ksize][i][j]*rhs[ksize-1][j][i][0] - lhsZ[0][1][AA][ksize][i][j]*rhs[ksize-1][j][i][1] - lhsZ[0][2][AA][ksize][i][j]*rhs[ksize-1][j][i][2] - lhsZ[0][3][AA][ksize][i][j]*rhs[ksize-1][j][i][3] - lhsZ[0][4][AA][ksize][i][j]*rhs[ksize-1][j][i][4]; rhs[ksize][j][i][1] = rhs[ksize][j][i][1] - lhsZ[1][0][AA][ksize][i][j]*rhs[ksize-1][j][i][0] - lhsZ[1][1][AA][ksize][i][j]*rhs[ksize-1][j][i][1] - lhsZ[1][2][AA][ksize][i][j]*rhs[ksize-1][j][i][2] - lhsZ[1][3][AA][ksize][i][j]*rhs[ksize-1][j][i][3] - lhsZ[1][4][AA][ksize][i][j]*rhs[ksize-1][j][i][4]; rhs[ksize][j][i][2] = rhs[ksize][j][i][2] - lhsZ[2][0][AA][ksize][i][j]*rhs[ksize-1][j][i][0] - lhsZ[2][1][AA][ksize][i][j]*rhs[ksize-1][j][i][1] - lhsZ[2][2][AA][ksize][i][j]*rhs[ksize-1][j][i][2] - lhsZ[2][3][AA][ksize][i][j]*rhs[ksize-1][j][i][3] - lhsZ[2][4][AA][ksize][i][j]*rhs[ksize-1][j][i][4]; rhs[ksize][j][i][3] = rhs[ksize][j][i][3] - lhsZ[3][0][AA][ksize][i][j]*rhs[ksize-1][j][i][0] - lhsZ[3][1][AA][ksize][i][j]*rhs[ksize-1][j][i][1] - lhsZ[3][2][AA][ksize][i][j]*rhs[ksize-1][j][i][2] - lhsZ[3][3][AA][ksize][i][j]*rhs[ksize-1][j][i][3] - lhsZ[3][4][AA][ksize][i][j]*rhs[ksize-1][j][i][4]; rhs[ksize][j][i][4] = rhs[ksize][j][i][4] - lhsZ[4][0][AA][ksize][i][j]*rhs[ksize-1][j][i][0] - lhsZ[4][1][AA][ksize][i][j]*rhs[ksize-1][j][i][1] - lhsZ[4][2][AA][ksize][i][j]*rhs[ksize-1][j][i][2] - lhsZ[4][3][AA][ksize][i][j]*rhs[ksize-1][j][i][3] - lhsZ[4][4][AA][ksize][i][j]*rhs[ksize-1][j][i][4]; } } //--------------------------------------------------------------------- // B(ksize) = B(ksize) - C(ksize-1)*A(ksize) // matmul_sub(AA,i,j,ksize,c, // $ CC,i,j,ksize-1,c,BB,i,j,ksize) //--------------------------------------------------------------------- //matmul_sub(lhsZ[ksize-1][i][AA], lhsZ[j][ksize][i][j][CC], lhsZ[j][i][ksize][BB]); #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (i = 1; i <= gp02; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= gp12; j++) { /* for(m = 0; m < 5; m++){ for(n = 0; n < 5; n++){ lhsZ[n][m][BB][ksize][i][j] = lhsZ[n][m][BB][ksize][i][j] - lhsZ[n][0][AA][ksize][i][j]*lhsZ[0][m][CC][ksize-1][i][j] - lhsZ[n][1][AA][ksize][i][j]*lhsZ[1][m][CC][ksize-1][i][j] - lhsZ[n][2][AA][ksize][i][j]*lhsZ[2][m][CC][ksize-1][i][j] - lhsZ[n][3][AA][ksize][i][j]*lhsZ[3][m][CC][ksize-1][i][j] - lhsZ[n][4][AA][ksize][i][j]*lhsZ[4][m][CC][ksize-1][i][j]; } } */ lhsZ[0][0][BB][ksize][i][j] = lhsZ[0][0][BB][ksize][i][j] - lhsZ[0][0][AA][ksize][i][j]*lhsZ[0][0][CC][ksize-1][i][j] - lhsZ[0][1][AA][ksize][i][j]*lhsZ[1][0][CC][ksize-1][i][j] - lhsZ[0][2][AA][ksize][i][j]*lhsZ[2][0][CC][ksize-1][i][j] - lhsZ[0][3][AA][ksize][i][j]*lhsZ[3][0][CC][ksize-1][i][j] - lhsZ[0][4][AA][ksize][i][j]*lhsZ[4][0][CC][ksize-1][i][j]; lhsZ[1][0][BB][ksize][i][j] = lhsZ[1][0][BB][ksize][i][j] - lhsZ[1][0][AA][ksize][i][j]*lhsZ[0][0][CC][ksize-1][i][j] - lhsZ[1][1][AA][ksize][i][j]*lhsZ[1][0][CC][ksize-1][i][j] - lhsZ[1][2][AA][ksize][i][j]*lhsZ[2][0][CC][ksize-1][i][j] - lhsZ[1][3][AA][ksize][i][j]*lhsZ[3][0][CC][ksize-1][i][j] - lhsZ[1][4][AA][ksize][i][j]*lhsZ[4][0][CC][ksize-1][i][j]; lhsZ[2][0][BB][ksize][i][j] = lhsZ[2][0][BB][ksize][i][j] - lhsZ[2][0][AA][ksize][i][j]*lhsZ[0][0][CC][ksize-1][i][j] - lhsZ[2][1][AA][ksize][i][j]*lhsZ[1][0][CC][ksize-1][i][j] - lhsZ[2][2][AA][ksize][i][j]*lhsZ[2][0][CC][ksize-1][i][j] - lhsZ[2][3][AA][ksize][i][j]*lhsZ[3][0][CC][ksize-1][i][j] - lhsZ[2][4][AA][ksize][i][j]*lhsZ[4][0][CC][ksize-1][i][j]; lhsZ[3][0][BB][ksize][i][j] = lhsZ[3][0][BB][ksize][i][j] - lhsZ[3][0][AA][ksize][i][j]*lhsZ[0][0][CC][ksize-1][i][j] - lhsZ[3][1][AA][ksize][i][j]*lhsZ[1][0][CC][ksize-1][i][j] - lhsZ[3][2][AA][ksize][i][j]*lhsZ[2][0][CC][ksize-1][i][j] - lhsZ[3][3][AA][ksize][i][j]*lhsZ[3][0][CC][ksize-1][i][j] - lhsZ[3][4][AA][ksize][i][j]*lhsZ[4][0][CC][ksize-1][i][j]; lhsZ[4][0][BB][ksize][i][j] = lhsZ[4][0][BB][ksize][i][j] - lhsZ[4][0][AA][ksize][i][j]*lhsZ[0][0][CC][ksize-1][i][j] - lhsZ[4][1][AA][ksize][i][j]*lhsZ[1][0][CC][ksize-1][i][j] - lhsZ[4][2][AA][ksize][i][j]*lhsZ[2][0][CC][ksize-1][i][j] - lhsZ[4][3][AA][ksize][i][j]*lhsZ[3][0][CC][ksize-1][i][j] - lhsZ[4][4][AA][ksize][i][j]*lhsZ[4][0][CC][ksize-1][i][j]; lhsZ[0][1][BB][ksize][i][j] = lhsZ[0][1][BB][ksize][i][j] - lhsZ[0][0][AA][ksize][i][j]*lhsZ[0][1][CC][ksize-1][i][j] - lhsZ[0][1][AA][ksize][i][j]*lhsZ[1][1][CC][ksize-1][i][j] - lhsZ[0][2][AA][ksize][i][j]*lhsZ[2][1][CC][ksize-1][i][j] - lhsZ[0][3][AA][ksize][i][j]*lhsZ[3][1][CC][ksize-1][i][j] - lhsZ[0][4][AA][ksize][i][j]*lhsZ[4][1][CC][ksize-1][i][j]; lhsZ[1][1][BB][ksize][i][j] = lhsZ[1][1][BB][ksize][i][j] - lhsZ[1][0][AA][ksize][i][j]*lhsZ[0][1][CC][ksize-1][i][j] - lhsZ[1][1][AA][ksize][i][j]*lhsZ[1][1][CC][ksize-1][i][j] - lhsZ[1][2][AA][ksize][i][j]*lhsZ[2][1][CC][ksize-1][i][j] - lhsZ[1][3][AA][ksize][i][j]*lhsZ[3][1][CC][ksize-1][i][j] - lhsZ[1][4][AA][ksize][i][j]*lhsZ[4][1][CC][ksize-1][i][j]; lhsZ[2][1][BB][ksize][i][j] = lhsZ[2][1][BB][ksize][i][j] - lhsZ[2][0][AA][ksize][i][j]*lhsZ[0][1][CC][ksize-1][i][j] - lhsZ[2][1][AA][ksize][i][j]*lhsZ[1][1][CC][ksize-1][i][j] - lhsZ[2][2][AA][ksize][i][j]*lhsZ[2][1][CC][ksize-1][i][j] - lhsZ[2][3][AA][ksize][i][j]*lhsZ[3][1][CC][ksize-1][i][j] - lhsZ[2][4][AA][ksize][i][j]*lhsZ[4][1][CC][ksize-1][i][j]; lhsZ[3][1][BB][ksize][i][j] = lhsZ[3][1][BB][ksize][i][j] - lhsZ[3][0][AA][ksize][i][j]*lhsZ[0][1][CC][ksize-1][i][j] - lhsZ[3][1][AA][ksize][i][j]*lhsZ[1][1][CC][ksize-1][i][j] - lhsZ[3][2][AA][ksize][i][j]*lhsZ[2][1][CC][ksize-1][i][j] - lhsZ[3][3][AA][ksize][i][j]*lhsZ[3][1][CC][ksize-1][i][j] - lhsZ[3][4][AA][ksize][i][j]*lhsZ[4][1][CC][ksize-1][i][j]; lhsZ[4][1][BB][ksize][i][j] = lhsZ[4][1][BB][ksize][i][j] - lhsZ[4][0][AA][ksize][i][j]*lhsZ[0][1][CC][ksize-1][i][j] - lhsZ[4][1][AA][ksize][i][j]*lhsZ[1][1][CC][ksize-1][i][j] - lhsZ[4][2][AA][ksize][i][j]*lhsZ[2][1][CC][ksize-1][i][j] - lhsZ[4][3][AA][ksize][i][j]*lhsZ[3][1][CC][ksize-1][i][j] - lhsZ[4][4][AA][ksize][i][j]*lhsZ[4][1][CC][ksize-1][i][j]; lhsZ[0][2][BB][ksize][i][j] = lhsZ[0][2][BB][ksize][i][j] - lhsZ[0][0][AA][ksize][i][j]*lhsZ[0][2][CC][ksize-1][i][j] - lhsZ[0][1][AA][ksize][i][j]*lhsZ[1][2][CC][ksize-1][i][j] - lhsZ[0][2][AA][ksize][i][j]*lhsZ[2][2][CC][ksize-1][i][j] - lhsZ[0][3][AA][ksize][i][j]*lhsZ[3][2][CC][ksize-1][i][j] - lhsZ[0][4][AA][ksize][i][j]*lhsZ[4][2][CC][ksize-1][i][j]; lhsZ[1][2][BB][ksize][i][j] = lhsZ[1][2][BB][ksize][i][j] - lhsZ[1][0][AA][ksize][i][j]*lhsZ[0][2][CC][ksize-1][i][j] - lhsZ[1][1][AA][ksize][i][j]*lhsZ[1][2][CC][ksize-1][i][j] - lhsZ[1][2][AA][ksize][i][j]*lhsZ[2][2][CC][ksize-1][i][j] - lhsZ[1][3][AA][ksize][i][j]*lhsZ[3][2][CC][ksize-1][i][j] - lhsZ[1][4][AA][ksize][i][j]*lhsZ[4][2][CC][ksize-1][i][j]; lhsZ[2][2][BB][ksize][i][j] = lhsZ[2][2][BB][ksize][i][j] - lhsZ[2][0][AA][ksize][i][j]*lhsZ[0][2][CC][ksize-1][i][j] - lhsZ[2][1][AA][ksize][i][j]*lhsZ[1][2][CC][ksize-1][i][j] - lhsZ[2][2][AA][ksize][i][j]*lhsZ[2][2][CC][ksize-1][i][j] - lhsZ[2][3][AA][ksize][i][j]*lhsZ[3][2][CC][ksize-1][i][j] - lhsZ[2][4][AA][ksize][i][j]*lhsZ[4][2][CC][ksize-1][i][j]; lhsZ[3][2][BB][ksize][i][j] = lhsZ[3][2][BB][ksize][i][j] - lhsZ[3][0][AA][ksize][i][j]*lhsZ[0][2][CC][ksize-1][i][j] - lhsZ[3][1][AA][ksize][i][j]*lhsZ[1][2][CC][ksize-1][i][j] - lhsZ[3][2][AA][ksize][i][j]*lhsZ[2][2][CC][ksize-1][i][j] - lhsZ[3][3][AA][ksize][i][j]*lhsZ[3][2][CC][ksize-1][i][j] - lhsZ[3][4][AA][ksize][i][j]*lhsZ[4][2][CC][ksize-1][i][j]; lhsZ[4][2][BB][ksize][i][j] = lhsZ[4][2][BB][ksize][i][j] - lhsZ[4][0][AA][ksize][i][j]*lhsZ[0][2][CC][ksize-1][i][j] - lhsZ[4][1][AA][ksize][i][j]*lhsZ[1][2][CC][ksize-1][i][j] - lhsZ[4][2][AA][ksize][i][j]*lhsZ[2][2][CC][ksize-1][i][j] - lhsZ[4][3][AA][ksize][i][j]*lhsZ[3][2][CC][ksize-1][i][j] - lhsZ[4][4][AA][ksize][i][j]*lhsZ[4][2][CC][ksize-1][i][j]; lhsZ[0][3][BB][ksize][i][j] = lhsZ[0][3][BB][ksize][i][j] - lhsZ[0][0][AA][ksize][i][j]*lhsZ[0][3][CC][ksize-1][i][j] - lhsZ[0][1][AA][ksize][i][j]*lhsZ[1][3][CC][ksize-1][i][j] - lhsZ[0][2][AA][ksize][i][j]*lhsZ[2][3][CC][ksize-1][i][j] - lhsZ[0][3][AA][ksize][i][j]*lhsZ[3][3][CC][ksize-1][i][j] - lhsZ[0][4][AA][ksize][i][j]*lhsZ[4][3][CC][ksize-1][i][j]; lhsZ[1][3][BB][ksize][i][j] = lhsZ[1][3][BB][ksize][i][j] - lhsZ[1][0][AA][ksize][i][j]*lhsZ[0][3][CC][ksize-1][i][j] - lhsZ[1][1][AA][ksize][i][j]*lhsZ[1][3][CC][ksize-1][i][j] - lhsZ[1][2][AA][ksize][i][j]*lhsZ[2][3][CC][ksize-1][i][j] - lhsZ[1][3][AA][ksize][i][j]*lhsZ[3][3][CC][ksize-1][i][j] - lhsZ[1][4][AA][ksize][i][j]*lhsZ[4][3][CC][ksize-1][i][j]; lhsZ[2][3][BB][ksize][i][j] = lhsZ[2][3][BB][ksize][i][j] - lhsZ[2][0][AA][ksize][i][j]*lhsZ[0][3][CC][ksize-1][i][j] - lhsZ[2][1][AA][ksize][i][j]*lhsZ[1][3][CC][ksize-1][i][j] - lhsZ[2][2][AA][ksize][i][j]*lhsZ[2][3][CC][ksize-1][i][j] - lhsZ[2][3][AA][ksize][i][j]*lhsZ[3][3][CC][ksize-1][i][j] - lhsZ[2][4][AA][ksize][i][j]*lhsZ[4][3][CC][ksize-1][i][j]; lhsZ[3][3][BB][ksize][i][j] = lhsZ[3][3][BB][ksize][i][j] - lhsZ[3][0][AA][ksize][i][j]*lhsZ[0][3][CC][ksize-1][i][j] - lhsZ[3][1][AA][ksize][i][j]*lhsZ[1][3][CC][ksize-1][i][j] - lhsZ[3][2][AA][ksize][i][j]*lhsZ[2][3][CC][ksize-1][i][j] - lhsZ[3][3][AA][ksize][i][j]*lhsZ[3][3][CC][ksize-1][i][j] - lhsZ[3][4][AA][ksize][i][j]*lhsZ[4][3][CC][ksize-1][i][j]; lhsZ[4][3][BB][ksize][i][j] = lhsZ[4][3][BB][ksize][i][j] - lhsZ[4][0][AA][ksize][i][j]*lhsZ[0][3][CC][ksize-1][i][j] - lhsZ[4][1][AA][ksize][i][j]*lhsZ[1][3][CC][ksize-1][i][j] - lhsZ[4][2][AA][ksize][i][j]*lhsZ[2][3][CC][ksize-1][i][j] - lhsZ[4][3][AA][ksize][i][j]*lhsZ[3][3][CC][ksize-1][i][j] - lhsZ[4][4][AA][ksize][i][j]*lhsZ[4][3][CC][ksize-1][i][j]; lhsZ[0][4][BB][ksize][i][j] = lhsZ[0][4][BB][ksize][i][j] - lhsZ[0][0][AA][ksize][i][j]*lhsZ[0][4][CC][ksize-1][i][j] - lhsZ[0][1][AA][ksize][i][j]*lhsZ[1][4][CC][ksize-1][i][j] - lhsZ[0][2][AA][ksize][i][j]*lhsZ[2][4][CC][ksize-1][i][j] - lhsZ[0][3][AA][ksize][i][j]*lhsZ[3][4][CC][ksize-1][i][j] - lhsZ[0][4][AA][ksize][i][j]*lhsZ[4][4][CC][ksize-1][i][j]; lhsZ[1][4][BB][ksize][i][j] = lhsZ[1][4][BB][ksize][i][j] - lhsZ[1][0][AA][ksize][i][j]*lhsZ[0][4][CC][ksize-1][i][j] - lhsZ[1][1][AA][ksize][i][j]*lhsZ[1][4][CC][ksize-1][i][j] - lhsZ[1][2][AA][ksize][i][j]*lhsZ[2][4][CC][ksize-1][i][j] - lhsZ[1][3][AA][ksize][i][j]*lhsZ[3][4][CC][ksize-1][i][j] - lhsZ[1][4][AA][ksize][i][j]*lhsZ[4][4][CC][ksize-1][i][j]; lhsZ[2][4][BB][ksize][i][j] = lhsZ[2][4][BB][ksize][i][j] - lhsZ[2][0][AA][ksize][i][j]*lhsZ[0][4][CC][ksize-1][i][j] - lhsZ[2][1][AA][ksize][i][j]*lhsZ[1][4][CC][ksize-1][i][j] - lhsZ[2][2][AA][ksize][i][j]*lhsZ[2][4][CC][ksize-1][i][j] - lhsZ[2][3][AA][ksize][i][j]*lhsZ[3][4][CC][ksize-1][i][j] - lhsZ[2][4][AA][ksize][i][j]*lhsZ[4][4][CC][ksize-1][i][j]; lhsZ[3][4][BB][ksize][i][j] = lhsZ[3][4][BB][ksize][i][j] - lhsZ[3][0][AA][ksize][i][j]*lhsZ[0][4][CC][ksize-1][i][j] - lhsZ[3][1][AA][ksize][i][j]*lhsZ[1][4][CC][ksize-1][i][j] - lhsZ[3][2][AA][ksize][i][j]*lhsZ[2][4][CC][ksize-1][i][j] - lhsZ[3][3][AA][ksize][i][j]*lhsZ[3][4][CC][ksize-1][i][j] - lhsZ[3][4][AA][ksize][i][j]*lhsZ[4][4][CC][ksize-1][i][j]; lhsZ[4][4][BB][ksize][i][j] = lhsZ[4][4][BB][ksize][i][j] - lhsZ[4][0][AA][ksize][i][j]*lhsZ[0][4][CC][ksize-1][i][j] - lhsZ[4][1][AA][ksize][i][j]*lhsZ[1][4][CC][ksize-1][i][j] - lhsZ[4][2][AA][ksize][i][j]*lhsZ[2][4][CC][ksize-1][i][j] - lhsZ[4][3][AA][ksize][i][j]*lhsZ[3][4][CC][ksize-1][i][j] - lhsZ[4][4][AA][ksize][i][j]*lhsZ[4][4][CC][ksize-1][i][j]; } } //--------------------------------------------------------------------- // multiply rhs(ksize) by b_inverse(ksize) and copy to rhs //--------------------------------------------------------------------- //binvrhs( lhsZ[i][j][BB], rhs[ksize][ksize][i][j] ); #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,pivot,coeff) #else #pragma omp target teams distribute parallel for simd private(pivot,coeff) collapse(2) #endif for (i = 1; i <= gp02; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(pivot,coeff) #endif for (j = 1; j <= gp12; j++) { /* for(m = 0; m < 5; m++){ pivot = 1.00/lhsZ[m][m][BB][ksize][i][j]; for(n = m+1; n < 5; n++){ lhsZ[m][n][BB][ksize][i][j] = lhsZ[m][n][BB][ksize][i][j]*pivot; } rhs[ksize][j][i][m] = rhs[ksize][j][i][m]*pivot; for(n = 0; n < 5; n++){ if(n != m){ coeff = lhsZ[n][m][BB][ksize][i][j]; for(z = m+1; z < 5; z++){ lhsZ[n][z][BB][ksize][i][j] = lhsZ[n][z][BB][ksize][i][j] - coeff*lhsZ[m][z][BB][ksize][i][j]; } rhs[ksize][j][i][n] = rhs[ksize][j][i][n] - coeff*rhs[ksize][j][i][m]; } } } */ pivot = 1.00/lhsZ[0][0][BB][ksize][i][j]; lhsZ[0][1][BB][ksize][i][j] = lhsZ[0][1][BB][ksize][i][j]*pivot; lhsZ[0][2][BB][ksize][i][j] = lhsZ[0][2][BB][ksize][i][j]*pivot; lhsZ[0][3][BB][ksize][i][j] = lhsZ[0][3][BB][ksize][i][j]*pivot; lhsZ[0][4][BB][ksize][i][j] = lhsZ[0][4][BB][ksize][i][j]*pivot; rhs[ksize][j][i][0] = rhs[ksize][j][i][0] *pivot; coeff = lhsZ[1][0][BB][ksize][i][j]; lhsZ[1][1][BB][ksize][i][j]= lhsZ[1][1][BB][ksize][i][j] - coeff*lhsZ[0][1][BB][ksize][i][j]; lhsZ[1][2][BB][ksize][i][j]= lhsZ[1][2][BB][ksize][i][j] - coeff*lhsZ[0][2][BB][ksize][i][j]; lhsZ[1][3][BB][ksize][i][j]= lhsZ[1][3][BB][ksize][i][j] - coeff*lhsZ[0][3][BB][ksize][i][j]; lhsZ[1][4][BB][ksize][i][j]= lhsZ[1][4][BB][ksize][i][j] - coeff*lhsZ[0][4][BB][ksize][i][j]; rhs[ksize][j][i][1] = rhs[ksize][j][i][1] - coeff*rhs[ksize][j][i][0]; coeff = lhsZ[2][0][BB][ksize][i][j]; lhsZ[2][1][BB][ksize][i][j]= lhsZ[2][1][BB][ksize][i][j] - coeff*lhsZ[0][1][BB][ksize][i][j]; lhsZ[2][2][BB][ksize][i][j]= lhsZ[2][2][BB][ksize][i][j] - coeff*lhsZ[0][2][BB][ksize][i][j]; lhsZ[2][3][BB][ksize][i][j]= lhsZ[2][3][BB][ksize][i][j] - coeff*lhsZ[0][3][BB][ksize][i][j]; lhsZ[2][4][BB][ksize][i][j]= lhsZ[2][4][BB][ksize][i][j] - coeff*lhsZ[0][4][BB][ksize][i][j]; rhs[ksize][j][i][2] = rhs[ksize][j][i][2] - coeff*rhs[ksize][j][i][0]; coeff = lhsZ[3][0][BB][ksize][i][j]; lhsZ[3][1][BB][ksize][i][j]= lhsZ[3][1][BB][ksize][i][j] - coeff*lhsZ[0][1][BB][ksize][i][j]; lhsZ[3][2][BB][ksize][i][j]= lhsZ[3][2][BB][ksize][i][j] - coeff*lhsZ[0][2][BB][ksize][i][j]; lhsZ[3][3][BB][ksize][i][j]= lhsZ[3][3][BB][ksize][i][j] - coeff*lhsZ[0][3][BB][ksize][i][j]; lhsZ[3][4][BB][ksize][i][j]= lhsZ[3][4][BB][ksize][i][j] - coeff*lhsZ[0][4][BB][ksize][i][j]; rhs[ksize][j][i][3] = rhs[ksize][j][i][3] - coeff*rhs[ksize][j][i][0]; coeff = lhsZ[4][0][BB][ksize][i][j]; lhsZ[4][1][BB][ksize][i][j]= lhsZ[4][1][BB][ksize][i][j] - coeff*lhsZ[0][1][BB][ksize][i][j]; lhsZ[4][2][BB][ksize][i][j]= lhsZ[4][2][BB][ksize][i][j] - coeff*lhsZ[0][2][BB][ksize][i][j]; lhsZ[4][3][BB][ksize][i][j]= lhsZ[4][3][BB][ksize][i][j] - coeff*lhsZ[0][3][BB][ksize][i][j]; lhsZ[4][4][BB][ksize][i][j]= lhsZ[4][4][BB][ksize][i][j] - coeff*lhsZ[0][4][BB][ksize][i][j]; rhs[ksize][j][i][4] = rhs[ksize][j][i][4] - coeff*rhs[ksize][j][i][0]; pivot = 1.00/lhsZ[1][1][BB][ksize][i][j]; lhsZ[1][2][BB][ksize][i][j] = lhsZ[1][2][BB][ksize][i][j]*pivot; lhsZ[1][3][BB][ksize][i][j] = lhsZ[1][3][BB][ksize][i][j]*pivot; lhsZ[1][4][BB][ksize][i][j] = lhsZ[1][4][BB][ksize][i][j]*pivot; rhs[ksize][j][i][1] = rhs[ksize][j][i][1] *pivot; coeff = lhsZ[0][1][BB][ksize][i][j]; lhsZ[0][2][BB][ksize][i][j]= lhsZ[0][2][BB][ksize][i][j] - coeff*lhsZ[1][2][BB][ksize][i][j]; lhsZ[0][3][BB][ksize][i][j]= lhsZ[0][3][BB][ksize][i][j] - coeff*lhsZ[1][3][BB][ksize][i][j]; lhsZ[0][4][BB][ksize][i][j]= lhsZ[0][4][BB][ksize][i][j] - coeff*lhsZ[1][4][BB][ksize][i][j]; rhs[ksize][j][i][0] = rhs[ksize][j][i][0] - coeff*rhs[ksize][j][i][1]; coeff = lhsZ[2][1][BB][ksize][i][j]; lhsZ[2][2][BB][ksize][i][j]= lhsZ[2][2][BB][ksize][i][j] - coeff*lhsZ[1][2][BB][ksize][i][j]; lhsZ[2][3][BB][ksize][i][j]= lhsZ[2][3][BB][ksize][i][j] - coeff*lhsZ[1][3][BB][ksize][i][j]; lhsZ[2][4][BB][ksize][i][j]= lhsZ[2][4][BB][ksize][i][j] - coeff*lhsZ[1][4][BB][ksize][i][j]; rhs[ksize][j][i][2] = rhs[ksize][j][i][2] - coeff*rhs[ksize][j][i][1]; coeff = lhsZ[3][1][BB][ksize][i][j]; lhsZ[3][2][BB][ksize][i][j]= lhsZ[3][2][BB][ksize][i][j] - coeff*lhsZ[1][2][BB][ksize][i][j]; lhsZ[3][3][BB][ksize][i][j]= lhsZ[3][3][BB][ksize][i][j] - coeff*lhsZ[1][3][BB][ksize][i][j]; lhsZ[3][4][BB][ksize][i][j]= lhsZ[3][4][BB][ksize][i][j] - coeff*lhsZ[1][4][BB][ksize][i][j]; rhs[ksize][j][i][3] = rhs[ksize][j][i][3] - coeff*rhs[ksize][j][i][1]; coeff = lhsZ[4][1][BB][ksize][i][j]; lhsZ[4][2][BB][ksize][i][j]= lhsZ[4][2][BB][ksize][i][j] - coeff*lhsZ[1][2][BB][ksize][i][j]; lhsZ[4][3][BB][ksize][i][j]= lhsZ[4][3][BB][ksize][i][j] - coeff*lhsZ[1][3][BB][ksize][i][j]; lhsZ[4][4][BB][ksize][i][j]= lhsZ[4][4][BB][ksize][i][j] - coeff*lhsZ[1][4][BB][ksize][i][j]; rhs[ksize][j][i][4] = rhs[ksize][j][i][4] - coeff*rhs[ksize][j][i][1]; pivot = 1.00/lhsZ[2][2][BB][ksize][i][j]; lhsZ[2][3][BB][ksize][i][j] = lhsZ[2][3][BB][ksize][i][j]*pivot; lhsZ[2][4][BB][ksize][i][j] = lhsZ[2][4][BB][ksize][i][j]*pivot; rhs[ksize][j][i][2] = rhs[ksize][j][i][2] *pivot; coeff = lhsZ[0][2][BB][ksize][i][j]; lhsZ[0][3][BB][ksize][i][j]= lhsZ[0][3][BB][ksize][i][j] - coeff*lhsZ[2][3][BB][ksize][i][j]; lhsZ[0][4][BB][ksize][i][j]= lhsZ[0][4][BB][ksize][i][j] - coeff*lhsZ[2][4][BB][ksize][i][j]; rhs[ksize][j][i][0] = rhs[ksize][j][i][0] - coeff*rhs[ksize][j][i][2]; coeff = lhsZ[1][2][BB][ksize][i][j]; lhsZ[1][3][BB][ksize][i][j]= lhsZ[1][3][BB][ksize][i][j] - coeff*lhsZ[2][3][BB][ksize][i][j]; lhsZ[1][4][BB][ksize][i][j]= lhsZ[1][4][BB][ksize][i][j] - coeff*lhsZ[2][4][BB][ksize][i][j]; rhs[ksize][j][i][1] = rhs[ksize][j][i][1] - coeff*rhs[ksize][j][i][2]; coeff = lhsZ[3][2][BB][ksize][i][j]; lhsZ[3][3][BB][ksize][i][j]= lhsZ[3][3][BB][ksize][i][j] - coeff*lhsZ[2][3][BB][ksize][i][j]; lhsZ[3][4][BB][ksize][i][j]= lhsZ[3][4][BB][ksize][i][j] - coeff*lhsZ[2][4][BB][ksize][i][j]; rhs[ksize][j][i][3] = rhs[ksize][j][i][3] - coeff*rhs[ksize][j][i][2]; coeff = lhsZ[4][2][BB][ksize][i][j]; lhsZ[4][3][BB][ksize][i][j]= lhsZ[4][3][BB][ksize][i][j] - coeff*lhsZ[2][3][BB][ksize][i][j]; lhsZ[4][4][BB][ksize][i][j]= lhsZ[4][4][BB][ksize][i][j] - coeff*lhsZ[2][4][BB][ksize][i][j]; rhs[ksize][j][i][4] = rhs[ksize][j][i][4] - coeff*rhs[ksize][j][i][2]; pivot = 1.00/lhsZ[3][3][BB][ksize][i][j]; lhsZ[3][4][BB][ksize][i][j] = lhsZ[3][4][BB][ksize][i][j]*pivot; rhs[ksize][j][i][3] = rhs[ksize][j][i][3] *pivot; coeff = lhsZ[0][3][BB][ksize][i][j]; lhsZ[0][4][BB][ksize][i][j]= lhsZ[0][4][BB][ksize][i][j] - coeff*lhsZ[3][4][BB][ksize][i][j]; rhs[ksize][j][i][0] = rhs[ksize][j][i][0] - coeff*rhs[ksize][j][i][3]; coeff = lhsZ[1][3][BB][ksize][i][j]; lhsZ[1][4][BB][ksize][i][j]= lhsZ[1][4][BB][ksize][i][j] - coeff*lhsZ[3][4][BB][ksize][i][j]; rhs[ksize][j][i][1] = rhs[ksize][j][i][1] - coeff*rhs[ksize][j][i][3]; coeff = lhsZ[2][3][BB][ksize][i][j]; lhsZ[2][4][BB][ksize][i][j]= lhsZ[2][4][BB][ksize][i][j] - coeff*lhsZ[3][4][BB][ksize][i][j]; rhs[ksize][j][i][2] = rhs[ksize][j][i][2] - coeff*rhs[ksize][j][i][3]; coeff = lhsZ[4][3][BB][ksize][i][j]; lhsZ[4][4][BB][ksize][i][j]= lhsZ[4][4][BB][ksize][i][j] - coeff*lhsZ[3][4][BB][ksize][i][j]; rhs[ksize][j][i][4] = rhs[ksize][j][i][4] - coeff*rhs[ksize][j][i][3]; pivot = 1.00/lhsZ[4][4][BB][ksize][i][j]; rhs[ksize][j][i][4] = rhs[ksize][j][i][4] *pivot; coeff = lhsZ[0][4][BB][ksize][i][j]; rhs[ksize][j][i][0] = rhs[ksize][j][i][0] - coeff*rhs[ksize][j][i][4]; coeff = lhsZ[1][4][BB][ksize][i][j]; rhs[ksize][j][i][1] = rhs[ksize][j][i][1] - coeff*rhs[ksize][j][i][4]; coeff = lhsZ[2][4][BB][ksize][i][j]; rhs[ksize][j][i][2] = rhs[ksize][j][i][2] - coeff*rhs[ksize][j][i][4]; coeff = lhsZ[3][4][BB][ksize][i][j]; rhs[ksize][j][i][3] = rhs[ksize][j][i][3] - coeff*rhs[ksize][j][i][4]; } } //--------------------------------------------------------------------- //--------------------------------------------------------------------- //--------------------------------------------------------------------- // back solve: if last cell, then generate U(ksize)=rhs(ksize) // else assume U(ksize) is loaded in un pack backsub_info // so just use it // after u(kstart) will be sent to next cell //--------------------------------------------------------------------- for (k = ksize-1; k >= 0; k--) { #pragma omp target teams distribute parallel for collapse(2) private(i,j,m,n) for (j = 1; j <= gp12; j++) { for (i = 1; i <= gp02; i++) { for (m = 0; m < BLOCK_SIZE; m++) { for (n = 0; n < BLOCK_SIZE; n++) { rhs[k][j][i][m] = rhs[k][j][i][m] - lhsZ[m][n][CC][k][i][j]*rhs[k+1][j][i][n]; } } } } } }/*end omp target data */ }
ref3.c
#ifdef GEN_PICTURES static void do_draw(const struct parameters *p, size_t key, size_t h, size_t w, double (*restrict g)[h][w]) { begin_picture(key, w-2, h-2, p->io_tmin, p->io_tmax); size_t i, j; for (i = 1; i < h-1; ++i) for (j = 1; j < w-1; ++j) draw_point(j-1, i-1, (*g)[i][j]); end_picture(); } #endif static void do_copy(size_t h, size_t w, double (*restrict g)[h][w]) { size_t i; /* copy left and right column to opposite border */ #pragma omp parallel for schedule(static) for (i = 0; i < h; ++i) { (*g)[i][w-1] = (*g)[i][1]; (*g)[i][0] = (*g)[i][w-2]; } } /* Does the reduction step and return if the convergence has setteled */ static int fill_report(const struct parameters *p, struct results *r, size_t h, size_t w, double (*restrict a)[h][w], double (*restrict b)[h][w], double iter, struct timeval *before) { /* compute min/max/avg */ double tmin = INFINITY, tmax = -INFINITY; double sum = 0.0; double maxdiff = 0.0; struct timeval after; /* We have said that the final reduction does not need to be included. */ gettimeofday(&after, NULL); #pragma omp parallel for schedule(static, 1) //collapse(2) for (size_t i = 1; i < h - 1; ++i) for (size_t j = 1; j < w - 1; ++j) { double v = (*a)[i][j]; double v_old = (*b)[i][j]; double diff = fabs(v - v_old); sum += v; if (tmin > v) tmin = v; if (tmax < v) tmax = v; if (diff > maxdiff) maxdiff = diff; } r->niter = iter; r->maxdiff = maxdiff; r->tmin = tmin; r->tmax = tmax; r->tavg = sum / (p->N * p->M); r->time = (double)(after.tv_sec - before->tv_sec) + (double)(after.tv_usec - before->tv_usec) / 1e6; return (maxdiff >= p->threshold) ? 0 : 1; }
multiply.h
#pragma once #include "intgemm_config.h" #include "interleave.h" #include "intrinsics.h" #include "vec_traits.h" #include "callbacks.h" #include <cmath> //sqrt namespace intgemm { struct MeanStd { float mean; float stddev; }; INTGEMM_SSE2 static inline float MaxFloat32(__m128 a) { // Fold to just using the first 64 bits. __m128 second_half = _mm_shuffle_ps(a, a, 3 * 4 + 2); a = _mm_max_ps(a, second_half); // Fold to just using the first 32 bits. second_half = _mm_shuffle_ps(a, a, 1); a = _mm_max_ps(a, second_half); // This casting compiles to nothing. return *reinterpret_cast<float*>(&a); } INTGEMM_SSE2 static inline dvector_t<CPUType::SSE2, int> PermuteSummer(__m128i pack0123, __m128i pack4567) { // No op for 128 bits: already reduced fully. return { pack0123, pack4567 }; } INTGEMM_AVX2 static inline float MaxFloat32(__m256 a) { return MaxFloat32(max_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 1))); } INTGEMM_AVX2 static inline __m256i PermuteSummer(__m256i pack0123, __m256i pack4567) { // This instruction generates 1s 2s 3s 4s 5f 6f 7f 8f __m256i rev = _mm256_permute2f128_si256(pack0123, pack4567, 0x21); // This instruction generates 1f 2f 3f 4f 5s 6s 7s 8s __m256i blended = _mm256_blend_epi32(pack0123, pack4567, 0xf0); return _mm256_add_epi32(rev, blended); } /* https://stackoverflow.com/questions/6996764/fastest-way-to-do-horizontal-float-vector-sum-on-x86 */ INTGEMM_SSSE3 static inline float horizontalSum(__m128 a) { __m128 shuf = _mm_movehdup_ps(a); // broadcast elements 3,1 to 2,0 __m128 sums = _mm_add_ps(a, shuf); shuf = _mm_movehl_ps(shuf, sums); // high half -> low half sums = _mm_add_ss(sums, shuf); return _mm_cvtss_f32(sums); } INTGEMM_AVX2 static inline float horizontalSum(__m256 a) { __m128 vlow = _mm256_castps256_ps128(a); __m128 vhigh = _mm256_extractf128_ps(a, 1); // high 128 vlow = _mm_add_ps(vlow, vhigh); // add the low 128 return horizontalSum(vlow); // and inline the sse3 version, which is optimal for AVX } #ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_AVX512BW static inline __m256i PermuteSummer(__m512i pack0123, __m512i pack4567) { // Form [0th 128-bit register of pack0123, 0st 128-bit register of pack4567, 2nd 128-bit register of pack0123, 2nd 128-bit register of pack4567] __m512i mix0 = _mm512_mask_permutex_epi64(pack0123, 0xcc, pack4567, (0 << 4) | (1 << 6)); // Form [1st 128-bit register of pack0123, 1st 128-bit register of pack4567, 3rd 128-bit register of pack0123, 3rd 128-bit register of pack4567] __m512i mix1 = _mm512_mask_permutex_epi64(pack4567, 0x33, pack0123, 2 | (3 << 2)); __m512i added = _mm512_add_epi32(mix0, mix1); // Now we have 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7. // Fold register over itself. return _mm256_add_epi32(_mm512_castsi512_si256(added), _mm512_extracti64x4_epi64(added, 1)); } // Find the maximum float. static inline INTGEMM_AVX512F float MaxFloat32(__m512 a) { // _mm512_extractf32x8_ps is AVX512DQ but we don't care about masking. // So cast to pd, do AVX512F _mm512_extractf64x4_pd, then cast to ps. __m256 upper = _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(a), 1)); return MaxFloat32(max_ps(_mm512_castps512_ps256(a), upper)); } static inline INTGEMM_AVX512F float horizontalSum(__m512 a) { __m256 low = _mm512_castps512_ps256(a); __m256 high = _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(a),1)); return horizontalSum(low) + horizontalSum(high); } #endif // Quantize function used for SSSE3 and AVX2. // Separate function for thread to work around gcc 7 bug that doesn't imbue // target attributes across #pragma omp parallel. #define INTGEMM_QUANTIZE_THREAD(target, Register, name) \ target static void QuantizeThread(const float *input, int8_t *output, float quant_mult, std::size_t count) { \ name::QuantizeTile8 q(quant_mult); \ _Pragma("omp for") \ for (std::size_t i = 0; i < count; i += sizeof(Register)) { \ *reinterpret_cast<Register*>(output + i) = q.Consecutive(input + i); \ } \ } #define INTGEMM_QUANTIZE(target, Register, name) \ target static void Quantize(const float *const input, int8_t *const output, float quant_mult, Index size) { \ assert(reinterpret_cast<uintptr_t>(input) % sizeof(Register) == 0); \ assert(reinterpret_cast<uintptr_t>(output) % sizeof(Register) == 0); \ const std::size_t kBatch = sizeof(Register); \ const std::size_t fast_end = size & ~(kBatch - 1); \ _Pragma("omp parallel") \ { \ QuantizeThread(input, output, quant_mult, fast_end); \ } \ std::size_t overhang = size & (kBatch - 1); \ if (!overhang) return; \ name::QuantizeTile8 q(quant_mult); \ /* Each does size(Register) / 32 == kBatch / 4 floats at a time. * If we're allowed to read one of them, then we can read the whole register. */ \ const float *inputs[4]; \ std::size_t i; \ for (i = 0; i < (overhang + (kBatch / 4) - 1) / (kBatch / 4); ++i) { \ inputs[i] = &input[fast_end + i * (kBatch / 4)]; \ } \ /* These will be clipped off. */ \ for (; i < 4; ++i) { \ inputs[i] = &input[fast_end]; \ } \ Register result = q.Tile(inputs[0], inputs[1], inputs[2], inputs[3]); \ std::memcpy(output + (size & ~(kBatch - 1)), &result, overhang); \ } /* Take 4 registers with 32-bit values to be horizontally added. Reduce them * to one register with 32-bit values in the pattern 1 2 3 4 1 2 3 4, leaving * the final addition (which crosses 128-bit lanes) to the caller. */ #define INTGEMM_PACK0123(target, Register) \ target inline Register Pack0123(Register sum0, Register sum1, Register sum2, Register sum3) { \ Interleave32(sum0, sum1); \ Register pack01 = add_epi32(sum0, sum1); \ Interleave32(sum2, sum3); \ Register pack23 = add_epi32(sum2, sum3); \ Interleave64(pack01, pack23); \ return add_epi32(pack01, pack23); \ } \ INTGEMM_PACK0123(INTGEMM_SSE2, __m128i) INTGEMM_PACK0123(INTGEMM_AVX2, __m256i) #ifdef INTGEMM_COMPILER_SUPPORTS_AVX512BW /* Only INTGEMM_AVX512F is necessary but due to GCC 5.4 bug we have to set INTGEMM_AVX512BW */ INTGEMM_PACK0123(INTGEMM_AVX512BW, __m512i) #endif template <typename Callback> INTGEMM_SSE2 static inline void RunCallback(Callback& callback_impl, dvector_t<CPUType::SSE2, int> total, Index row_idx, Index col_idx, Index rows, Index cols) { callback_impl(total.first, callbacks::OutputBufferInfo(row_idx, col_idx, rows, cols)); callback_impl(total.second, callbacks::OutputBufferInfo(row_idx, col_idx + 4, rows, cols)); } template <typename Callback> INTGEMM_AVX2 static inline void RunCallback(Callback& callback_impl, vector_t<CPUType::AVX2, int> total, Index row_idx, Index col_idx, Index rows, Index cols) { callback_impl(total, callbacks::OutputBufferInfo(row_idx, col_idx, rows, cols)); } // 16-bit multiplier for INTGEMM_SSE2, INTGEMM_AVX2, and AVX512. // C = A * B * unquant_mult // // This has been substantially revised from Jacob Devlin's SSE code which is: // Copyright (c) 2017 Microsoft Corporation // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // A is a row-major quantized matrix (from PrepareA) // B is a rearranged quantized matrix (from PrepareB) // C is output in row-major form. // // All of A, B, and C must be in aligned to a multiple of the register size: // INTGEMM_SSE2: 16 bytes // INTGEMM_AVX2: 32 bytes // AVX512: 64 bytes. // // A_rows can be anything non-negative. // width must be a multiple of the register size. // B_cols must be a multiple of 8. // Multiply16 #define INTGEMM_MULTIPLY16(Register, target, cpu_type) \ template <typename Callback> target static void Multiply(const int16_t *A, const int16_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { \ assert(width % (sizeof(Register) / sizeof(int16_t)) == 0); \ assert(B_cols % 8 == 0); \ assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); \ assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \ const int simd_width = width / (sizeof(Register) / sizeof(int16_t)); \ auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \ _Pragma("omp for") \ for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \ const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \ /* Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.*/ \ for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { \ const Register *A_row = reinterpret_cast<const Register*>(A + A_rowidx * width); \ /* These will be packed 32-bit integers containing sums for each row of B multiplied by the row of A. \ Iterate over shared (inner) dimension.*/ \ int k = 0; \ Register a = *(A_row + k); \ Register sum0 = madd_epi16(a, *(B0_col + k * 8)); \ Register sum1 = madd_epi16(a, *(B0_col + k * 8 + 1)); \ Register sum2 = madd_epi16(a, *(B0_col + k * 8 + 2)); \ Register sum3 = madd_epi16(a, *(B0_col + k * 8 + 3)); \ Register sum4 = madd_epi16(a, *(B0_col + k * 8 + 4)); \ Register sum5 = madd_epi16(a, *(B0_col + k * 8 + 5)); \ Register sum6 = madd_epi16(a, *(B0_col + k * 8 + 6)); \ Register sum7 = madd_epi16(a, *(B0_col + k * 8 + 7)); \ for (int k = 1; k < simd_width; ++k) { \ Register a = *(A_row + k); \ /* Multiply 16-bit, horizontally add to packed 32-bit integers.*/ \ Register mult0 = madd_epi16(a, *(B0_col + k * 8)); \ Register mult1 = madd_epi16(a, *(B0_col + k * 8 + 1)); \ Register mult2 = madd_epi16(a, *(B0_col + k * 8 + 2)); \ Register mult3 = madd_epi16(a, *(B0_col + k * 8 + 3)); \ Register mult4 = madd_epi16(a, *(B0_col + k * 8 + 4)); \ Register mult5 = madd_epi16(a, *(B0_col + k * 8 + 5)); \ Register mult6 = madd_epi16(a, *(B0_col + k * 8 + 6)); \ Register mult7 = madd_epi16(a, *(B0_col + k * 8 + 7)); \ /* Sum packed 32-bit integers with danger of overflow. TODO: accumulate in 64-bit every so often.*/ \ sum0 = add_epi32(sum0, mult0); \ sum1 = add_epi32(sum1, mult1); \ sum2 = add_epi32(sum2, mult2); \ sum3 = add_epi32(sum3, mult3); \ sum4 = add_epi32(sum4, mult4); \ sum5 = add_epi32(sum5, mult5); \ sum6 = add_epi32(sum6, mult6); \ sum7 = add_epi32(sum7, mult7); \ } \ /* Reduce sums within 128-bit lanes.*/ \ Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \ Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \ /*The specific implementation may need to reduce further.*/ \ auto total = PermuteSummer(pack0123, pack4567); \ RunCallback(callback_impl, total, A_rowidx, B0_colidx, A_rows, B_cols); \ } \ } \ } \ //An int8_prepbias version of the above code, using the add 127 technique #define INTGEMM_PREPAREBIASFOR8(Register, target, cpu_type) \ template <class Callback> target static void PrepareBias(const int8_t *B, Index width, Index B_cols, Callback callback) { \ assert(width % (sizeof(Register) / sizeof(int8_t)) == 0); \ assert(B_cols % 8 == 0); \ assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \ const int simd_width = width / (sizeof(Register) / sizeof(int8_t)); \ auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \ const Register a = set1_epi8<Register>(1); \ _Pragma("omp for") \ for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \ const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \ /*const Register *A_row = reinterpret_cast<const Register*>(A + A_rowidx * width);*/ \ /* These will be packed 16-bit integers containing sums for each row of B multiplied by the row of A. \ Iterate over shared (inner) dimension.*/ \ int k = 0; \ Register sum0 = maddubs_epi16(a, *(B0_col + k * 8)); \ Register sum1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \ Register sum2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \ Register sum3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \ Register sum4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \ Register sum5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \ Register sum6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \ Register sum7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \ /* Upcast to 32-bit and horizontally add. Seems a bit faster if this is declared here.*/ \ Register ones = set1_epi16<Register>(1); \ sum0 = madd_epi16(sum0, ones); \ sum1 = madd_epi16(sum1, ones); \ sum2 = madd_epi16(sum2, ones); \ sum3 = madd_epi16(sum3, ones); \ sum4 = madd_epi16(sum4, ones); \ sum5 = madd_epi16(sum5, ones); \ sum6 = madd_epi16(sum6, ones); \ sum7 = madd_epi16(sum7, ones); \ for (int k = 1; k < simd_width; ++k) { \ /*Register a = *(A_row + k);*/ \ /* Multiply 8-bit, horizontally add to packed 16-bit integers.*/ \ Register mult0 = maddubs_epi16(a, *(B0_col + k * 8)); \ Register mult1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \ Register mult2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \ Register mult3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \ Register mult4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \ Register mult5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \ Register mult6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \ Register mult7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \ /* Upcast to 32-bit and horizontally add.*/ \ mult0 = madd_epi16(mult0, ones); \ mult1 = madd_epi16(mult1, ones); \ mult2 = madd_epi16(mult2, ones); \ mult3 = madd_epi16(mult3, ones); \ mult4 = madd_epi16(mult4, ones); \ mult5 = madd_epi16(mult5, ones); \ mult6 = madd_epi16(mult6, ones); \ mult7 = madd_epi16(mult7, ones); \ /*Add in 32bit*/ \ sum0 = add_epi32(sum0, mult0); \ sum1 = add_epi32(sum1, mult1); \ sum2 = add_epi32(sum2, mult2); \ sum3 = add_epi32(sum3, mult3); \ sum4 = add_epi32(sum4, mult4); \ sum5 = add_epi32(sum5, mult5); \ sum6 = add_epi32(sum6, mult6); \ sum7 = add_epi32(sum7, mult7); \ \ } \ /* Reduce sums within 128-bit lanes.*/ \ Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \ Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \ /*The specific implementation may need to reduce further.*/ \ auto total = PermuteSummer(pack0123, pack4567); \ RunCallback(callback_impl, total, 0, B0_colidx, 1, B_cols); \ } \ } \ //An int8 version of the above code, using the add 127 technique #define INTGEMM_MULTIPLY8SHIFT(Register, target, cpu_type) \ template <class Callback> target static void Multiply8Shift(const uint8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { \ assert(width % (sizeof(Register) / sizeof(int8_t)) == 0); \ assert(B_cols % 8 == 0); \ assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); \ assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \ const int simd_width = width / (sizeof(Register) / sizeof(int8_t)); \ auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \ _Pragma("omp for") \ for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \ const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \ /* Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.*/ \ for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { \ const Register *A_row = reinterpret_cast<const Register*>(A + A_rowidx * width); \ /* These will be packed 16-bit integers containing sums for each row of B multiplied by the row of A. \ Iterate over shared (inner) dimension.*/ \ int k = 0; \ Register a = *(A_row + k); \ Register sum0 = maddubs_epi16(a, *(B0_col + k * 8)); \ Register sum1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \ Register sum2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \ Register sum3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \ Register sum4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \ Register sum5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \ Register sum6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \ Register sum7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \ /* Upcast to 32-bit and horizontally add. Seems a bit faster if this is declared here.*/ \ Register ones = set1_epi16<Register>(1); \ sum0 = madd_epi16(sum0, ones); \ sum1 = madd_epi16(sum1, ones); \ sum2 = madd_epi16(sum2, ones); \ sum3 = madd_epi16(sum3, ones); \ sum4 = madd_epi16(sum4, ones); \ sum5 = madd_epi16(sum5, ones); \ sum6 = madd_epi16(sum6, ones); \ sum7 = madd_epi16(sum7, ones); \ for (int k = 1; k < simd_width; ++k) { \ Register a = *(A_row + k); \ /* Multiply 8-bit, horizontally add to packed 16-bit integers.*/ \ Register mult0 = maddubs_epi16(a, *(B0_col + k * 8)); \ Register mult1 = maddubs_epi16(a, *(B0_col + k * 8 + 1)); \ Register mult2 = maddubs_epi16(a, *(B0_col + k * 8 + 2)); \ Register mult3 = maddubs_epi16(a, *(B0_col + k * 8 + 3)); \ Register mult4 = maddubs_epi16(a, *(B0_col + k * 8 + 4)); \ Register mult5 = maddubs_epi16(a, *(B0_col + k * 8 + 5)); \ Register mult6 = maddubs_epi16(a, *(B0_col + k * 8 + 6)); \ Register mult7 = maddubs_epi16(a, *(B0_col + k * 8 + 7)); \ /* Upcast to 32-bit and horizontally add.*/ \ mult0 = madd_epi16(mult0, ones); \ mult1 = madd_epi16(mult1, ones); \ mult2 = madd_epi16(mult2, ones); \ mult3 = madd_epi16(mult3, ones); \ mult4 = madd_epi16(mult4, ones); \ mult5 = madd_epi16(mult5, ones); \ mult6 = madd_epi16(mult6, ones); \ mult7 = madd_epi16(mult7, ones); \ /*Add in 32bit*/ \ sum0 = add_epi32(sum0, mult0); \ sum1 = add_epi32(sum1, mult1); \ sum2 = add_epi32(sum2, mult2); \ sum3 = add_epi32(sum3, mult3); \ sum4 = add_epi32(sum4, mult4); \ sum5 = add_epi32(sum5, mult5); \ sum6 = add_epi32(sum6, mult6); \ sum7 = add_epi32(sum7, mult7); \ \ } \ /* Reduce sums within 128-bit lanes.*/ \ Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \ Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \ /*The specific implementation may need to reduce further.*/ \ auto total = PermuteSummer(pack0123, pack4567); \ RunCallback(callback_impl, total, A_rowidx, B0_colidx, A_rows, B_cols); \ } \ } \ } \ /* 8-bit matrix multiply used by AVX and AVX2. * These have two peculiar properties: * 1. The sign instructions don't exist in AVX512. * 2. 16 registers means gcc's register allocation failed so I wrote it in my * own asm. * 3. They support 3-argument vpsignb and vpmaddubsw. * * Fun fact: AVX introduced the three-argument vpsignb and vpmaddubsw but only * for 128-bit, despite the primary change in AVX being the addition of * 256-bit. We had to wait for INTGEMM_AVX2 to get 256-bit versions of vpsignb and * vpmaddubsw. That's why this code is generic over 128-bit or 256-bit. */ INTGEMM_AVX2 inline static void InnerINTGEMM_AVX2( __m256i a, const __m256i *b, __m256i &sum0, __m256i &sum1, __m256i &sum2, __m256i &sum3, __m256i &sum4, __m256i &sum5, __m256i &sum6, __m256i &sum7) { // Annoyingly the only 8-bit multiply is signed * unsigned (maddubs). // So we take the sign bits off of a and apply them each b in a * b. // // We have only 16 YMM registers but we want to store: // 1 for a (or |a|) // 8 temporaries for applying sign to each column of B. // 8 sums. // // gcc's register allocator does: // 1 for a, do all the sign application, then overwrite with |a| // 8 temporaries // 7 sums in registers + 1 on the stack // // But it's possible to complete an operation early, freeing up its // temporary register for reuse. But completing an operation early // requires us to have |a| for vpmaddubsw while completing the later // operation needs a again to apply sign. // // So we do two columns, 0 and 1, early. This allows b0_b6 and b1_b7 // to be reused by columns 6 and 7, respectively. And there's enough // registers to store both a and |a|. // // These are the temporary variables used to process each column of b. // We let the compiler choose which register number is which, but force // it to allocate all registers. __m256i absa; __m256i b0_b6, b1_b7, b2, b3, b4, b5; // Maybe this will tell gcc that we're accessing 8 registers starting // at B_live. Though I doubt it because we're passing the address as a // register. typedef struct { __m256i x[8]; } B_range; asm( // Copy the first 6 columns of b to registers. We assume B has // been rearranged so that these 8 columns are consecutive. // vpsignb does not take a memory address as its second argument, // so this can't be inlined into vsignb. "vmovdqa (%[B]), %[b0_b6]\n" "vmovdqa %c[size](%[B]), %[b1_b7]\n" // These multiplies are executed by the assembler, not by the CPU // at run time. // I would have liked to just initialize b2 etc above but that // would make it an input argument "+x" instead of "=&x". And +x // counts as two operands for purposes of gcc's annoying 30-operand // limit. "vmovdqa 2*%c[size](%[B]), %[b2]\n" "vmovdqa 3*%c[size](%[B]), %[b3]\n" "vmovdqa 4*%c[size](%[B]), %[b4]\n" "vmovdqa 5*%c[size](%[B]), %[b5]\n" // Store the absolute value of a in absa. "vpabsb %[a], %[absa]\n" // If a byte of a is negative, negate the corresponding byte in // b0_b6 etc. "vpsignb %[a], %[b0_b6], %[b0_b6]\n" "vpsignb %[a], %[b1_b7], %[b1_b7]\n" // Multiply signed * unsigned then horizontally add to form packed // 16-bit integers: // b0[0] * |a|[0] + b0[1] * |a|[1], b0[2] * |a|[2] + b0[3] * |a|[3], ... "vpmaddubsw %[b0_b6], %[absa], %[b0_b6]\n" "vpmaddubsw %[b1_b7], %[absa], %[b1_b7]\n" // vpmaddubsw has latency 5 so work on some other sign bits while // we're at it. "vpsignb %[a], %[b2], %[b2]\n" "vpsignb %[a], %[b3], %[b3]\n" "vpsignb %[a], %[b4], %[b4]\n" "vpsignb %[a], %[b5], %[b5]\n" // Perform a 16-bit add with saturation to accumlate sums. "vpaddsw %[b0_b6], %[sum0], %[sum0]\n" // Now we can reuse b0_b6 for b6 "vmovdqa 6*%c[size](%[B]), %[b0_b6]\n" "vpaddsw %[b1_b7], %[sum1], %[sum1]\n" // Now we can reuse b1_b7 for b7 "vmovdqa 7*%c[size](%[B]), %[b1_b7]\n" // More crunching while the load happens. "vpmaddubsw %[b2], %[absa], %[b2]\n" "vpmaddubsw %[b3], %[absa], %[b3]\n" "vpmaddubsw %[b4], %[absa], %[b4]\n" "vpsignb %[a], %[b0_b6], %[b0_b6]\n" "vpsignb %[a], %[b1_b7], %[b1_b7]\n" "vpmaddubsw %[b5], %[absa], %[b5]\n" "vpmaddubsw %[b0_b6], %[absa], %[b0_b6]\n" "vpmaddubsw %[b1_b7], %[absa], %[b1_b7]\n" "vpaddsw %[b2], %[sum2], %[sum2]\n" "vpaddsw %[b3], %[sum3], %[sum3]\n" "vpaddsw %[b4], %[sum4], %[sum4]\n" "vpaddsw %[b5], %[sum5], %[sum5]\n" "vpaddsw %[b0_b6], %[sum6], %[sum6]\n" "vpaddsw %[b1_b7], %[sum7], %[sum7]\n" : [sum0] "+x" (sum0), [sum1] "+x" (sum1), [sum2] "+x" (sum2), [sum3] "+x" (sum3), [sum4] "+x" (sum4), [sum5] "+x" (sum5), [sum6] "+x" (sum6), [sum7] "+x" (sum7), [b0_b6] "=&x" (b0_b6), [b1_b7] "=&x" (b1_b7), [b2] "=&x" (b2), [b3] "=&x" (b3), [b4] "=&x" (b4), [b5] "=&x" (b5), [absa] "=&x" (absa) : // I would like to use m here but that non-deterministically // chooses %(eax) or -256$(eax) and there's no way to add to that // memory address: // https://gcc.gnu.org/ml/gcc-help/2011-04/msg00518.html // [B] "r" (reinterpret_cast<const B_range*>(b)), [a] "x" (a), [size] "i" (sizeof(__m256i)) ); } // For INTGEMM_SSSE3 without AVX INTGEMM_SSSE3 inline static void InnerINTGEMM_SSSE3( __m128i a, const __m128i *b, __m128i &sum0, __m128i &sum1, __m128i &sum2, __m128i &sum3, __m128i &sum4, __m128i &sum5, __m128i &sum6, __m128i &sum7) { __m128i a_positive = abs_epi8(a); sum0 = adds_epi16(sum0, maddubs_epi16(a_positive, sign_epi8(b[0], a))); sum1 = adds_epi16(sum1, maddubs_epi16(a_positive, sign_epi8(b[1], a))); sum2 = adds_epi16(sum2, maddubs_epi16(a_positive, sign_epi8(b[2], a))); sum3 = adds_epi16(sum3, maddubs_epi16(a_positive, sign_epi8(b[3], a))); sum4 = adds_epi16(sum4, maddubs_epi16(a_positive, sign_epi8(b[4], a))); sum5 = adds_epi16(sum5, maddubs_epi16(a_positive, sign_epi8(b[5], a))); sum6 = adds_epi16(sum6, maddubs_epi16(a_positive, sign_epi8(b[6], a))); sum7 = adds_epi16(sum7, maddubs_epi16(a_positive, sign_epi8(b[7], a))); } //INTGEMM_AVX2 or INTGEMM_SSSE3 multiply #define INTGEMM_MULTIPLY8(Register, target, cpu_type) \ template <typename Callback> target static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { \ assert(width % sizeof(Register) == 0); \ assert(B_cols % 8 == 0); \ assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); \ assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); \ const int simd_width = width / sizeof(Register); \ auto callback_impl = callbacks::CallbackImpl<cpu_type, Callback>(callback); \ _Pragma("omp for") \ for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { \ const Register *B0_col = reinterpret_cast<const Register *>(B) + simd_width * B0_colidx; \ /*Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once.*/ \ for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { \ /*Iterate over shared (inner) dimension.*/ \ const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width); \ const Register *A_end = A_live + simd_width; \ const Register *B_live = B0_col; \ /* Rather than initializing as zeros and adding, just initialize the first.*/ \ Register a = *(A_live++); \ Register a_positive = abs_epi8(a); \ /* These will be packed 16-bit integers containing sums for each column of B multiplied by the row of A.*/ \ Register sum0 = maddubs_epi16(a_positive, sign_epi8(B_live[0], a)); \ Register sum1 = maddubs_epi16(a_positive, sign_epi8(B_live[1], a)); \ Register sum2 = maddubs_epi16(a_positive, sign_epi8(B_live[2], a)); \ Register sum3 = maddubs_epi16(a_positive, sign_epi8(B_live[3], a)); \ Register sum4 = maddubs_epi16(a_positive, sign_epi8(B_live[4], a)); \ Register sum5 = maddubs_epi16(a_positive, sign_epi8(B_live[5], a)); \ Register sum6 = maddubs_epi16(a_positive, sign_epi8(B_live[6], a)); \ Register sum7 = maddubs_epi16(a_positive, sign_epi8(B_live[7], a)); \ B_live += 8; \ /* Use A as the loop variable so the add can be done where gcc likes it for branch prediction.*/ \ for (; A_live != A_end; ++A_live, B_live += 8) { \ Inner##target(*A_live, B_live, sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7); \ } \ /* Convert 16-bit to 32-bit and add, not caring what parts are added. * Implementations: * 1. https://github.com/tesseract-ocr/tesseract/blob/master/src/arch/intsimdmatrixavx2.cpp#L67 under Apache license: * This does a multiply by 1 and horizontal add: * _mm512_madd_epi16(sum, _mm512_set1_epi16(1)) * Current fastest. * * 2. Signed extension and fold halves: * sum = _mm512_add_epi32( * _mm512_cvtepi16_epi32(_mm512_castsi512_si256(sum)), * _mm512_cvtepi16_epi32(_mm512_extracti64x4_epi64(sum, 1))); * * 3. Sign extend by abuse of bitshift, then add. * sum = _mm512_add_epi32( * _mm512_srai_epi32(_mm512_slli_epi32(sum, 16), 16), * _mm512_srai_epi32(sum, 16)); */ \ Register ones = set1_epi16<Register>(1); \ sum0 = madd_epi16(sum0, ones); \ sum1 = madd_epi16(sum1, ones); \ sum2 = madd_epi16(sum2, ones); \ sum3 = madd_epi16(sum3, ones); \ sum4 = madd_epi16(sum4, ones); \ sum5 = madd_epi16(sum5, ones); \ sum6 = madd_epi16(sum6, ones); \ sum7 = madd_epi16(sum7, ones); \ Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); \ Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); \ auto total = PermuteSummer(pack0123, pack4567); \ RunCallback(callback_impl, total, A_rowidx, B0_colidx, A_rows, B_cols); \ } \ } \ } /* Wrap a multiply call in OMP parallelism. Here it launches threads then * inside the implementation there is a pragma omp for. In gcc >= 8 these * could have been the same but older compilers don't imbue target attributes * on the hidden function created by pragma omp parallel. * * Also, gcc 7 is unable to deduce the function pointer type (for ChooseCPU) if * I use typename Backend::Integer directly in the arguments. As a workaround, * have a default template argument Integer then use that so it's resolved. */ template <class Callback, class Backend, class Integer = typename Backend::Integer> static inline void OMPParallelWrap(const Integer *A, const Integer *B, Index A_rows, Index width, Index B_cols, Callback callback) { #pragma omp parallel Backend::template Multiply<Callback>(A, B, A_rows, width, B_cols, callback); } template <class Callback, class Backend> static inline void OMPParallelWrap8Shift(const uint8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { #pragma omp parallel Backend::template Multiply8Shift<Callback>(A, B, A_rows, width, B_cols, callback); } #define INTGEMM_MAXABSOLUTE(Register, target) \ target static inline float MaxAbsolute(const float *begin_float, const float *end_float) { \ assert(end_float > begin_float); \ assert(reinterpret_cast<uintptr_t>(begin_float) % sizeof(Register) == 0); \ const Register *begin = reinterpret_cast<const Register*>(begin_float); \ const float *end_reg = end_float - (reinterpret_cast<uintptr_t>(end_float) % sizeof(Register)) / sizeof(float); \ const Register *end = reinterpret_cast<const Register*>(end_reg); \ union {float f; int32_t i;} and_convert, float_convert; \ and_convert.i = 0x7fffffff; \ Register and_me = set1_ps<Register>(and_convert.f); \ Register highest = setzero_ps<Register>(); \ for (; begin < end; ++begin) { \ Register reg = and_ps(and_me, *begin); \ highest = max_ps(highest, reg); \ } \ float ret = MaxFloat32(highest); \ /* Overhang: this would be more efficient if done in a single SIMD operation with some zeroing */ \ for (const float *i = end_reg; i < end_float; ++i) { \ float_convert.f = *i; \ float_convert.i &= and_convert.i; \ ret = std::max(ret, float_convert.f); \ } \ return ret; \ } \ #define INTGEMM_VECTORMEANSTD(Register, target) \ target static inline MeanStd VectorMeanStd(const float *begin_float, const float *end_float, bool absolute) { \ /* Computes the euclidean norm and returns the mean and the standard deviation. Optionally it can be the mean and standard deviation in absolute terms. */ \ assert(end_float > begin_float); \ assert((end_float - begin_float) % (sizeof(Register) / sizeof(float)) == 0); \ size_t num_items = end_float - begin_float; \ const Register *begin = reinterpret_cast<const Register*>(begin_float); \ const Register *end = reinterpret_cast<const Register*>(end_float); \ Register squares = set1_ps<Register>(0); \ Register sums = set1_ps<Register>(0); \ if (absolute) { \ const Register mask = set1_ps<Register>(-0.f); \ for (; begin != end; begin++) { \ Register vec = *begin; \ vec = andnot_ps(mask, vec); \ squares = add_ps(squares, mul_ps(vec, vec)); \ sums = add_ps(sums, vec); \ } \ } else { \ for (; begin != end; begin++) { \ Register vec = *begin; \ squares = add_ps(squares, mul_ps(vec, vec)); \ sums = add_ps(sums, vec); \ } \ } \ float squares_sum = horizontalSum(squares); \ float normal_sums = horizontalSum(sums); \ MeanStd ret; \ ret.mean = normal_sums/num_items; \ ret.stddev = std::sqrt((squares_sum/num_items) - (ret.mean*ret.mean)); \ return ret; \ } \ } // namespace intgemm
kji_optimize.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> int A_row; int A_col; int B_row; int B_col; int **constructMatrix(int row, int col){ int **matrix = (int **)malloc(sizeof(int *) * row); for (int i = 0; i < row;i++){ matrix[i] = (int *)malloc(sizeof(int) * col); } return matrix; } void freeMatrix(int **matrix, int row, int col){ for (int i = 0; i < row;i++){ free(matrix[i]); } free(matrix); } int main(int argc, char *argv[]){ A_row = atoi(*(argv + 1)); A_col = atoi(*(argv + 2)); B_row = atoi(*(argv + 3)); B_col = atoi(*(argv + 4)); int number_of_threads = atoi(*(argv + 5)); FILE *input = fopen("matrix", "r"); int **A = constructMatrix(A_row, A_col); int **B = constructMatrix(B_row, B_col); int **C = constructMatrix(A_row, B_col); //read A for (int i = 0; i < A_row;i++){ for (int j = 0; j < A_col;j++){ fscanf(input, "%d", &A[i][j]); } } //read B for (int i = 0; i < B_row;i++){ for (int j = 0; j < B_col;j++){ fscanf(input, "%d", &B[i][j]); } } fclose(input); double start_time = omp_get_wtime(); //multiply: int i, j, k; int temp; #pragma omp parallel for shared(A,B,C) private(i,j,k,temp) num_threads(number_of_threads) for (k = 0; k < A_col;k++){ for (j = 0; j < B_col;j++){ temp = B[k][j]; for (i = 0; i < A_row;i++){ #pragma omp atomic C[i][j] += A[i][k] * temp; } } } double end_time = omp_get_wtime(); printf("%s: %g sec.\n", "kji_optimize_runtime", end_time - start_time); //output the result to compare with golden result FILE *out = fopen("kji_optimize_result", "w"); for (int i = 0; i < A_row;i++){ for (int j = 0; j < B_col;j++){ fprintf(out, "%d ", C[i][j]); } fprintf(out, "\n"); } fprintf(out, "\n"); fclose(out); freeMatrix(A, A_row, A_col); freeMatrix(B, B_row, B_col); freeMatrix(C, A_row, B_col); return 0; }
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "../profiler/profiler.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() {} /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() {} void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size() - 1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray(src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i - 1]), priority); reduce[i] = buf.copy_buf[i - 1]; const_vars[i - 1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); NDArray out = buf_merged; is_serial_push_ ? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray( kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>( rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray>& in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray>& in, NDArray* out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template <typename DType> inline static void ReduceSumCPU(const std::vector<DType*>& dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i += 4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i + 1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i + 1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i + 2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i + 1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i + 2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i + 3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template <typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() {} void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size() - 1); const std::string profiler_scope = profiler::ProfilerScope::Get()->GetCurrentProfilerScope() + "comm_dev:"; for (size_t i = 0; i < src.size() - 1; ++i) { buf.copy_buf[i] = NDArray(buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); buf.copy_buf[i].AssignStorageInfo(profiler_scope, "copy_buf"); } } for (size_t i = 0; i < src.size() - 1; ++i) { CopyFromTo(src[i + 1], &(buf.copy_buf[i]), priority); reduce[i + 1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); const std::string profiler_scope = profiler::ProfilerScope::Get()->GetCurrentProfilerScope() + "comm_dev:"; for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.copy_buf[i].AssignStorageInfo(profiler_scope, "copy_buf"); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i].AssignStorageInfo(profiler_scope, "residual"); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(mxnet::TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_recv_buf[i].AssignStorageInfo(profiler_scope, "compressed_recv_buf"); buf.compressed_send_buf[i] = NDArray(mxnet::TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i].AssignStorageInfo(profiler_scope, "compressed_send_buf"); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnStart on_start, Engine::CallbackOnComplete on_complete) { on_start(); const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>( rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>( rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, mxnet::TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } const std::string profiler_scope = profiler::ProfilerScope::Get()->GetCurrentProfilerScope() + "kvstore:comm_dev:"; for (auto& sorted_key_attr : sorted_key_attrs_) { const int key = std::get<0>(sorted_key_attr); const mxnet::TShape& shape = std::get<1>(sorted_key_attr); const int type = std::get<2>(sorted_key_attr); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto& ctx_info_kv : ctx_info) { size_t size = ctx_info_kv.second.second; if (size <= min_size) { ctx = ctx_info_kv.second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); buf.merged.AssignStorageInfo(profiler_scope, "merge_buf_" + std::to_string(key)); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n * n); for (int i = 0; i < n; ++i) { // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i * n + j] = 1; } } } } if (enabled != n * (n - 1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n * (n - 1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i * n + j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
GB_unop__floor_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__floor_fc32_fc32) // op(A') function: GB (_unop_tran__floor_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_cfloorf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_cfloorf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_cfloorf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FLOOR || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__floor_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cfloorf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_cfloorf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__floor_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
UncertainDataEstimator.h
/// \ingroup base /// \class ttk::UncertainDataEstimator /// \author Michael Michaux <michauxmichael89@gmail.com> /// \date August 2016. /// /// \brief TTK processing package that takes an input ensemble data set /// (represented by a list of scalar fields) and which computes various /// vertexwise statistics (PDF estimation, bounds, moments, etc.) /// /// \sa ttkUncertainDataEstimator.cpp %for a usage example. #ifndef _UNCERTAINDATAESTIMATOR_H #define _UNCERTAINDATAESTIMATOR_H // base code includes #include <Wrapper.h> namespace ttk{ template <class dataType> class PDFBounds : public Debug { public: PDFBounds() { numberOfVertices_ = 0; } ~PDFBounds() { flush(); } int evaluateRealization(const void *voidPointer) { #ifndef TTK_ENABLE_KAMIKAZE if(!(numberOfVertices_>0)) { return -1; // Number of vertices not defined } #endif const dataType *inputData = reinterpret_cast<const dataType*>(voidPointer); SimplexId numberOfVertices = static_cast<SimplexId>(numberOfVertices_); /* Initialize if first call since a change */ if (!(upperBound_.size()==numberOfVertices) || !(lowerBound_.size()==numberOfVertices)) { upperBound_.resize(numberOfVertices); lowerBound_.resize(numberOfVertices); #ifdef TTK_ENABLE_OPENMP #pragma omp parallel for num_threads(threadNumber_) #endif for (size_t i = 0 ; i < numberOfVertices ; i++) { upperBound_[i] = inputData[i]; lowerBound_[i] = inputData[i]; } } else { /* Update the two fields with the new input */ #ifdef TTK_ENABLE_OPENMP #pragma omp parallel for num_threads(threadNumber_) #endif for (size_t i = 0 ; i < numberOfVertices ; i++) { // Upper Bound if (inputData[i] > upperBound_[i]) { upperBound_[i] = inputData[i]; } // Lower Bound if (inputData[i] < lowerBound_[i]) { lowerBound_[i] = inputData[i]; } } } return 0; } std::pair<dataType,dataType> getRange() const { std::pair<dataType,dataType> range; range.first = getRangeMin(); range.second = getRangeMax(); return range; } dataType getRangeMax() const { if(upperBound_.size()) { dataType maxValue = upperBound_[0]; for (size_t i = 1; i < upperBound_.size(); i++) { if (upperBound_[i] > maxValue) { maxValue = upperBound_[i]; } } return maxValue; } else { return 0; } } dataType getRangeMin() const { if(lowerBound_.size()) { dataType minValue = lowerBound_[0]; for (size_t i = 1; i < lowerBound_.size(); i++) { if (lowerBound_[i] < minValue) { minValue = lowerBound_[i]; } } return minValue; } else { return 0; } } inline int flush() { numberOfVertices_ = 0; upperBound_.clear(); lowerBound_.clear(); return 0; } inline dataType* getLowerBoundPointer() { return lowerBound_.data(); } inline dataType* getUpperBoundPointer() { return upperBound_.data(); } inline int setNumberOfVertices(const SimplexId number) { numberOfVertices_ = number; return 0; } protected: SimplexId numberOfVertices_; std::vector<dataType> upperBound_; std::vector<dataType> lowerBound_; }; class PDFHistograms : public Debug { public: PDFHistograms() { numberOfBins_ = 0; numberOfInputs_ = 0; numberOfVertices_ = 0; rangeMax_ = 0; rangeMin_ = 0; } ~PDFHistograms() { flush(); } template <class dataType> int evaluateRealization(const dataType *inputData) { #ifndef TTK_ENABLE_KAMIKAZE if(!(rangeMin_<rangeMax_)) { return -1; // Range error } if(!(numberOfBins_>0)) { return -2; // Number of bins not defined } if(!(numberOfVertices_>0)) { return -3; // Number of vertices not defined } #endif if(numberOfInputs_ == 0) { /* Initialize */ probability_.resize(numberOfBins_); double dx = (rangeMax_-rangeMin_) / static_cast<double>(numberOfBins_); for (size_t i=0 ; i < numberOfBins_ ; i++) { probability_[i].resize(numberOfVertices_); binValue_[i] = rangeMin_+(dx/2.0)+(static_cast<double>(i)*dx); } } /* Add input datas */ for(SimplexId i=0 ; i<numberOfVertices_ ; i++) { int bin = static_cast<int>(floor((inputData[i]-rangeMin_)*numberOfBins_/(rangeMax_-rangeMin_))); bin = (bin == numberOfBins_) ? numberOfBins_-1 : bin; probability_[bin][i] += 1.0; } numberOfInputs_++; return 0; } inline int flush() { binValue_.clear(); numberOfBins_ = 0; numberOfInputs_ = 0; numberOfVertices_ = 0; rangeMax_ = 0; rangeMin_ = 0; // selection_.clear(); // TODO : selection support return 0; } inline double* getBinFieldPointer(const int binId) { if(binId < numberOfBins_) { return probability_[binId].data(); } else { return NULL; } } void getVertexHistogram(const SimplexId vertexId, std::vector<double> &histogram) const { histogram.resize(numberOfBins_); if(vertexId < numberOfVertices_) { #ifdef TTK_ENABLE_OPENMP #ifdef _WIN32 #pragma omp parallel for num_threads(threadNumber_) #else #pragma omp parallel for num_threads(threadNumber_) \ schedule(static, numberOfBins_/threadNumber_) #endif #endif for(int i=0 ; i< (int) numberOfBins_ ; i++) { if((SimplexId)probability_[i].size()==numberOfVertices_) { histogram[i] = probability_[i][vertexId]; } else { histogram[i] = 0.0; } } } else { fill(histogram.begin(), histogram.end(), 0.0); } } int normalize() { const double normalization = 1.0 / static_cast<double>(numberOfInputs_); #ifdef TTK_ENABLE_OPENMP #ifdef _WIN32 #pragma omp parallel for num_threads(threadNumber_) #else #pragma omp parallel for num_threads(threadNumber_) collapse(2) \ schedule(static, (numberOfBins_*numberOfVertices_)/threadNumber_) #endif #endif for(int i=0 ; i< (int) numberOfBins_ ; i++) { for(SimplexId j=0 ; j< (SimplexId) numberOfVertices_ ; j++) { probability_[i][j] *= normalization; } } return 0; } inline int setNumberOfBins(const int number) { numberOfBins_ = number; return 0; } inline int setNumberOfVertices(const SimplexId number) { numberOfVertices_ = number; return 0; } inline int setRange(const double min, const double max) { rangeMin_ = min; rangeMax_ = max; return 0; } protected: std::vector<double> binValue_; std::vector<std::vector<double> > probability_; int numberOfBins_; int numberOfInputs_; SimplexId numberOfVertices_; double rangeMin_; double rangeMax_; // std::vector<int> selection_; // TODO : selection support }; class UncertainDataEstimator : public Debug{ public: UncertainDataEstimator(); ~UncertainDataEstimator(); /// Execute the package. /// \return Returns 0 upon success, negative values otherwise. template <class dataType> int execute() const; /// Pass a pointer to an input array representing a scalarfield. /// The array is expected to be correctly allocated. idx in [0,numberOfInputs_[ /// \param idx Index of the input scalar field. /// \param data Pointer to the data array. /// \return Returns 0 upon success, negative values otherwise. /// \sa setNumberOfInputs() and setVertexNumber(). inline int setInputDataPointer(int idx, void *data){ if(idx < numberOfInputs_){ inputData_[idx] = data; } else{ return -1; } return 0; } /// Pass a pointer to an output array representing the lower bound scalar field. /// The array is expected to be correctly allocated. /// \param data Pointer to the data array. /// \return Returns 0 upon success, negative values otherwise. /// \sa setVertexNumber() inline int setOutputLowerBoundField(void *data){ outputLowerBoundField_ = data; return 0; } /// Pass a pointer to an output array representing the upper bound scalar field. /// The array is expected to be correctly allocated. /// \param data Pointer to the data array. /// \return Returns 0 upon success, negative values otherwise. /// \sa setVertexNumber() inline int setOutputUpperBoundField(void *data){ outputUpperBoundField_ = data; return 0; } inline int setOutputProbability(int idx, void *data){ if(idx < binCount_){ outputProbability_[idx] = data; } return 0; } inline int setOutputMeanField(void *data) { outputMeanField_ = data; return 0; } inline int setComputeLowerBound(const bool &state){ computeLowerBound_ = state; return 0; } inline int setComputeUpperBound(const bool &state){ computeUpperBound_ = state; return 0; } /// Set the number of vertices in the scalar field. /// \param vertexNumber Number of vertices in the data-set. /// \return Returns 0 upon success, negative values otherwise. inline int setVertexNumber(const SimplexId &vertexNumber){ vertexNumber_ = vertexNumber; return 0; } inline int setBinCount(const int &binCount){ binCount_ = binCount; if(outputProbability_) free(outputProbability_); outputProbability_ = (void **) malloc(binCount*sizeof(void *)); for(int b=0 ; b<binCount ; b++) outputProbability_[b] = NULL; if(binValues_) free(binValues_); binValues_ = (double *) malloc(binCount*sizeof(double)); return 0; } /// Set the number of input scalar fields /// \param numberOfInputs Number of input scalar fields. /// \return Returns 0 upon success, negative values otherwise inline int setNumberOfInputs(int numberOfInputs){ numberOfInputs_ = numberOfInputs; if(inputData_) free(inputData_); inputData_ = (void **) malloc(numberOfInputs*sizeof(void *)); for(int i=0 ; i<numberOfInputs ; i++){ inputData_[i] = NULL; } return 0; } inline double getBinValue(int b){ if(b<binCount_) return binValues_[b]; return 0.0; } protected: SimplexId vertexNumber_; int numberOfInputs_; int binCount_; double *binValues_; //TODO : std::vector<double> bool computeLowerBound_; bool computeUpperBound_; void **inputData_; //TODO : std::vector<void*> void *outputLowerBoundField_; void *outputUpperBoundField_; void **outputProbability_; //TODO : std::vector<void*> void *outputMeanField_; }; } // if the package is a pure template class, uncomment the following line // #include <UncertainDataEstimator.cpp> // template functions template <class dataType> int ttk::UncertainDataEstimator::execute() const{ Timer t; // Check the consistency of the variables #ifndef TTK_ENABLE_KAMIKAZE if(!numberOfInputs_) return -1; if(!vertexNumber_) return -2; if(!inputData_) return -3; for(int i=0 ; i<numberOfInputs_ ; i++){ if(!inputData_[i]) return -4; } if(!outputLowerBoundField_) return -5; if(!outputUpperBoundField_) return -6; #endif SimplexId count = 0; // Pointers type casting dataType *outputLowerBoundField = (dataType *) outputLowerBoundField_; dataType *outputUpperBoundField = (dataType *) outputUpperBoundField_; double **outputProbability = (double **) outputProbability_; dataType **inputData = (dataType **) inputData_; double *outputMeanField = static_cast<double*>(outputMeanField_); #ifdef TTK_ENABLE_OPENMP omp_lock_t writeLock; omp_init_lock(&writeLock); #pragma omp parallel for num_threads(threadNumber_) #endif for(SimplexId v = 0; v < (SimplexId) vertexNumber_; v++){ // Avoid any processing if the abort signal is sent if((!wrapper_)||((wrapper_)&&(!wrapper_->needsToAbort()))){ // For the lower bound scalar field if(computeLowerBound_){ // Initialisation : values of the first input outputLowerBoundField[v] = inputData[0][v]; // Loop over the inputs for(int inp=1 ; inp < numberOfInputs_ ; inp++){ // Minimum value if(computeLowerBound_) if(inputData[inp][v] < outputLowerBoundField[v]) outputLowerBoundField[v] = inputData[inp][v]; } } // For the upper bound scalar field if(computeUpperBound_){ // Initialisation : values of the first input outputUpperBoundField[v] = inputData[0][v]; // Loop over the inputs for(int inp=1 ; inp < numberOfInputs_ ; inp++){ // Maximum value if(computeUpperBound_) if(inputData[inp][v] > outputUpperBoundField[v]) outputUpperBoundField[v] = inputData[inp][v]; } } // Update the progress bar of the wrapping code -- to adapt if(debugLevel_ > advancedInfoMsg){ #ifdef TTK_ENABLE_OPENMP omp_set_lock(&writeLock); #endif if((wrapper_) &&(!(count % ((vertexNumber_)/10)))){ wrapper_->updateProgress((count + 1.0) /vertexNumber_); } count++; #ifdef TTK_ENABLE_OPENMP omp_unset_lock(&writeLock); #endif } } } // Histogram if(computeUpperBound_ && computeLowerBound_){ // Range double range[2]; range[0] = outputLowerBoundField[0]; range[1] = outputUpperBoundField[0]; for(SimplexId v=0 ; v<vertexNumber_ ; v++){ if(outputLowerBoundField[v] < range[0]) range[0] = outputLowerBoundField[v]; if(outputUpperBoundField[v] > range[1]) range[1] = outputUpperBoundField[v]; } // Interval between bins double dx = (range[1]-range[0]) / (double)binCount_; // Bin values for(int b=0 ; b<binCount_ ; b++){ binValues_[b] = range[0]+(dx/2.0) + (double)b * dx; } int idx; double increment = 1.0 / (double)numberOfInputs_; #ifdef TTK_ENABLE_OPENMP #pragma omp parallel for private(idx) num_threads(threadNumber_) #endif for(SimplexId v=0 ; v<vertexNumber_ ; v++){ for(int i=0 ; i<numberOfInputs_ ; i++){ idx = (int) floor((inputData[i][v]-range[0])*binCount_/(range[1]-range[0])); idx = (idx==binCount_) ? binCount_-1 : idx; outputProbability[idx][v] += increment; } } } // Mean field for(SimplexId v=0 ; v<vertexNumber_ ; v++) { double sum = 0.0; for(int i=0 ; i<numberOfInputs_ ; i++) { sum += static_cast<double>(inputData[i][v]); } outputMeanField[v] = sum / static_cast<double>(numberOfInputs_); } #ifdef TTK_ENABLE_OPENMP omp_destroy_lock(&writeLock); #endif { std::stringstream msg; msg << "[UncertainDataEstimator] Data-set (" << vertexNumber_ << " points) processed in " << t.getElapsedTime() << " s. (" << threadNumber_ << " thread(s))." << std::endl; dMsg(std::cout, msg.str(), timeMsg); } return 0; } #endif // UNCERTAINDATAESTIMATOR_H
convolution_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack4_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { v4f32 _sum = (v4f32)__msa_fill_w(0); if (bias_data_ptr) { _sum = (v4f32)__msa_ld_w(bias_data_ptr + p * 4, 0); } const float* kptr = (const float*)weight_data_pack4.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const float* sptr = m.row(i * stride_h) + j * stride_w * 4; for (int k = 0; k < maxk; k++) // 29.23 { const float* slptr = sptr + space_ofs[k] * 4; v4f32 _val0 = __msa_fill_w_f32(slptr[0]); v4f32 _val1 = __msa_fill_w_f32(slptr[1]); v4f32 _val2 = __msa_fill_w_f32(slptr[2]); v4f32 _val3 = __msa_fill_w_f32(slptr[3]); v4f32 _w0 = (v4f32)__msa_ld_w(kptr, 0); v4f32 _w1 = (v4f32)__msa_ld_w(kptr + 4, 0); v4f32 _w2 = (v4f32)__msa_ld_w(kptr + 8, 0); v4f32 _w3 = (v4f32)__msa_ld_w(kptr + 12, 0); _sum = __msa_fmadd_w(_sum, _val0, _w0); _sum = __msa_fmadd_w(_sum, _val1, _w1); _sum = __msa_fmadd_w(_sum, _val2, _w2); _sum = __msa_fmadd_w(_sum, _val3, _w3); kptr += 16; } } _sum = activation_ps(_sum, activation_type, activation_params); __msa_st_w((v4i32)_sum, outptr + j * 4, 0); } outptr += outw * 4; } } }
two_flip_move_generator.h
/*****************************************************************************/ // Copyright (c) 2020-2021 Yuji KOGUMA // Released under the MIT license // https://opensource.org/licenses/mit-license.php /*****************************************************************************/ #ifndef PRINTEMPS_NEIGHBORHOOD_TWO_FLIP_MOVE_GENERATOR_H__ #define PRINTEMPS_NEIGHBORHOOD_TWO_FLIP_MOVE_GENERATOR_H__ #include "abstract_move_generator.h" namespace printemps { namespace neighborhood { /*****************************************************************************/ template <class T_Variable, class T_Expression> class TwoFlipMoveGenerator : public AbstractMoveGenerator<T_Variable, T_Expression> { private: public: /*************************************************************************/ TwoFlipMoveGenerator(void) { /// nothing to do } /*************************************************************************/ virtual ~TwoFlipMoveGenerator(void) { /// nothing to do } /*************************************************************************/ void setup(const std::vector< std::pair<model_component::Variable<T_Variable, T_Expression> *, model_component::Variable<T_Variable, T_Expression> *>> &a_FLIPPABLE_VARIABLE_PTR_PAIRS) { /** * Setup move objects. */ const int PAIRS_SIZE = a_FLIPPABLE_VARIABLE_PTR_PAIRS.size(); this->m_moves.resize(2 * PAIRS_SIZE); this->m_flags.resize(2 * PAIRS_SIZE); for (auto i = 0; i < PAIRS_SIZE; i++) { auto &move = this->m_moves[2 * i]; move.sense = MoveSense::TwoFlip; move.alterations.emplace_back( a_FLIPPABLE_VARIABLE_PTR_PAIRS[i].first, 1); move.alterations.emplace_back( a_FLIPPABLE_VARIABLE_PTR_PAIRS[i].second, 0); move.is_univariable_move = false; move.is_selection_move = false; utility::update_union_set(&(move.related_constraint_ptrs), a_FLIPPABLE_VARIABLE_PTR_PAIRS[i] .first->related_constraint_ptrs()); utility::update_union_set(&(move.related_constraint_ptrs), a_FLIPPABLE_VARIABLE_PTR_PAIRS[i] .second->related_constraint_ptrs()); move.is_special_neighborhood_move = true; move.is_available = true; move.overlap_rate = 0.0; this->m_moves[2 * i + 1] = move; this->m_moves[2 * i + 1].alterations[0].second = 0; this->m_moves[2 * i + 1].alterations[1].second = 1; } /** * Setup move objects. */ auto move_updater = // [this](auto * a_moves_ptr, // auto * a_flags, // const bool a_ACCEPT_ALL, // const bool a_ACCEPT_OBJECTIVE_IMPROVABLE, // const bool a_ACCEPT_FEASIBILITY_IMPROVABLE, // [[maybe_unused]] const bool a_IS_ENABLED_PARALLEL) { const int MOVES_SIZE = a_moves_ptr->size(); #ifdef _OPENMP #pragma omp parallel for if (a_IS_ENABLED_PARALLEL) schedule(static) #endif for (auto i = 0; i < MOVES_SIZE; i++) { (*a_flags)[i] = 1; if (!(*a_moves_ptr)[i].is_available) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_selection_variable( (*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } if (neighborhood::has_fixed_variable((*a_moves_ptr)[i])) { (*a_flags)[i] = 0; continue; } for (const auto &alteration : (*a_moves_ptr)[i].alterations) { if (alteration.first->value() == alteration.second) { (*a_flags)[i] = 0; break; } } if ((*a_flags)[i] == 0) { continue; } if (a_ACCEPT_ALL) { /** nothing to do */ } else { if (a_ACCEPT_OBJECTIVE_IMPROVABLE && neighborhood::has_objective_improvable_variable( (*a_moves_ptr)[i])) { continue; } if (a_ACCEPT_FEASIBILITY_IMPROVABLE && neighborhood::has_feasibility_improvable_variable( (*a_moves_ptr)[i])) { continue; } (*a_flags)[i] = 0; } } }; this->m_move_updater = move_updater; } }; } // namespace neighborhood } // namespace printemps #endif /*****************************************************************************/ // END /*****************************************************************************/
GB_binop__islt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_01__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_03__islt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_uint16) // A*D function (colscale): GB (_AxD__islt_uint16) // D*A function (rowscale): GB (_DxB__islt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__islt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__islt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_uint16) // C=scalar+B GB (_bind1st__islt_uint16) // C=scalar+B' GB (_bind1st_tran__islt_uint16) // C=A+scalar GB (_bind2nd__islt_uint16) // C=A'+scalar GB (_bind2nd_tran__islt_uint16) // C type: uint16_t // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ uint16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_UINT16 || GxB_NO_ISLT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_uint16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *restrict Cx = (uint16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint16_t *Cx = (uint16_t *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
window.c
/********************************************************************[libaroma]* * Copyright (C) 2011-2015 Ahmad Amarullah (http://amarullz.com/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *______________________________________________________________________________ * * Filename : window.c * Description : window * * + This is part of libaroma, an embedded ui toolkit. * + 06/04/15 - Author(s): Ahmad Amarullah * */ #ifndef __libaroma_window_c__ #define __libaroma_window_c__ #include <aroma_internal.h> #include "ui_internal.h" #ifdef __cplusplus extern "C" { #endif /* check wm macro */ #define __CHECK_WM(RETVAL) \ if (libaroma_wm()==NULL){ \ ALOGW("window manager uninitialized"); \ return RETVAL; \ } /* * Variable : _libaroma_window_measurement_dp * Type : byte * Descriptions: default measurement */ static byte _libaroma_window_measurement_dp=1; /* * Function : libaroma_window_usedp * Return Value: byte * Descriptions: use dp for measurement */ byte libaroma_window_usedp(byte isdp){ if (isdp==1){ _libaroma_window_measurement_dp=1; } else if (!isdp){ _libaroma_window_measurement_dp=0; } return _libaroma_window_measurement_dp; } /* End of libaroma_window_usedp */ /* * Function : libaroma_window_measure_point * Return Value: int * Descriptions: mesure point */ int libaroma_window_measure_point(int x){ if (_libaroma_window_measurement_dp){ return libaroma_dp(x); } return x; } /* End of libaroma_window_measure_point */ /* * Function : _libaroma_window_measure_save * Return Value: void * Descriptions: save measurement value */ void _libaroma_window_measure_save(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win!=NULL){ if (_libaroma_window_measurement_dp){ win->left = libaroma_px(win->x); win->top = libaroma_px(win->y); win->width= libaroma_px(win->w); win->height= libaroma_px(win->h); } else{ win->left = win->x; win->top = win->y; win->width= win->w; win->height= win->h; } } if (ctl!=NULL){ if (_libaroma_window_measurement_dp){ ctl->left = libaroma_px(ctl->x); ctl->top = libaroma_px(ctl->y); ctl->width= libaroma_px(ctl->w); ctl->height= libaroma_px(ctl->h); } else{ ctl->left = ctl->x; ctl->top = ctl->y; ctl->width= ctl->w; ctl->height= ctl->h; } } } /* End of _libaroma_window_measure_save */ /* * Function : libaroma_window_measure_calculate * Return Value: int * Descriptions: calculate measurement */ int libaroma_window_measure_calculate( int cv, int pos, int max, int is_size, int x){ if (is_size){ if (pos<=0){ switch (pos){ case LIBAROMA_POS_HALF: return (max / 2)-x; break; case LIBAROMA_POS_1P3: return (max / 3)-x; break; case LIBAROMA_POS_2P3: return (max * 2 / 3)-x; break; case LIBAROMA_POS_1P4: return (max / 4)-x; break; case LIBAROMA_POS_3P4: return (max * 3 / 4)-x; break; case LIBAROMA_SIZE_FULL: return max; break; case LIBAROMA_SIZE_HALF: return max / 2; break; case LIBAROMA_SIZE_THIRD: return max / 3; break; case LIBAROMA_SIZE_QUARTER: return max / 4; break; default: return abs(pos); } } } else{ if (pos<0){ switch (pos){ case LIBAROMA_POS_HALF: return max / 2; break; case LIBAROMA_POS_1P3: return max / 3; break; case LIBAROMA_POS_2P3: return max * 2 / 3; break; case LIBAROMA_POS_1P4: return max / 4; break; case LIBAROMA_POS_3P4: return max * 3 / 4; break; default: return abs(pos); } } } return cv; } /* End of libaroma_window_measure_calculate */ /* * Function : libaroma_window_measure_size * Return Value: byte * Descriptions: measure window size */ byte libaroma_window_measure_size(LIBAROMA_WINDOWP win){ if (win){ if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } if (_libaroma_window_measurement_dp){ win->x = libaroma_dp(win->rx); win->y = libaroma_dp(win->ry); win->w = libaroma_dp(win->rw); win->h = libaroma_dp(win->rh); } else{ win->x = win->rx; win->y = win->ry; win->w = win->rw; win->h = win->rh; } win->ax=win->x; win->ay=win->y; win->x=libaroma_window_measure_calculate( win->x, win->rx, libaroma_wm()->w, 0, 0 ); win->y=libaroma_window_measure_calculate( win->y, win->ry, libaroma_wm()->h, 0, 0 ); win->w=libaroma_window_measure_calculate( win->w, win->rw, libaroma_wm()->w, 1, win->x ); win->h=libaroma_window_measure_calculate( win->h, win->rh, libaroma_wm()->h, 1, win->y ); if (win->w+win->x>libaroma_wm()->w){ win->w = libaroma_wm()->w-win->x; } if (win->h+win->y>libaroma_wm()->h){ win->h = libaroma_wm()->h-win->y; } _libaroma_window_measure_save(win,NULL); LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } return 0; } /* End of libaroma_window_measure */ /* * Function : _libaroma_window_ui_thread * Return Value: byte * Descriptions: window ui thread */ byte _libaroma_window_ui_thread(LIBAROMA_WINDOWP win) { int i; byte need_sync = 0; if (win->active==1){ LIBAROMA_CONTROLP toast_ctl=NULL; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ LIBAROMA_CONTROLP c=win->childs[i]; if (c->handler->thread!=NULL){ if (!libaroma_ctl_is_toast(c)){ //if not a toast, draw now if (c->handler->thread(c)){ if (libaroma_control_draw(c,0)){ libaroma_wm_updatesync( c->x+win->x, c->y+win->y, c->w, c->h, 0 ); need_sync=1; } } } else toast_ctl=c; //else, save it for draw at last } } if (toast_ctl!=NULL){ if (libaroma_control_draw(toast_ctl, 0)){ libaroma_wm_updatesync( toast_ctl->x+win->x, toast_ctl->y+win->y, toast_ctl->w, toast_ctl->h, 0 ); libaroma_png_save(win->dc, "/tmp/dc.png"); if (!need_sync) need_sync=1; } } } return need_sync; } /* End of _libaroma_window_ui_thread */ /* * Function : libaroma_window * Return Value: LIBAROMA_WINDOWP * Descriptions: creates a new window */ LIBAROMA_WINDOWP libaroma_window( char * bg_theme_name, int x, int y, int w, int h ){ __CHECK_WM(NULL); LIBAROMA_WINDOWP win = (LIBAROMA_WINDOWP) calloc(sizeof(LIBAROMA_WINDOW),1); if (!win){ ALOGW("libaroma_window alloc window data failed"); return NULL; } if (bg_theme_name){ snprintf(win->theme_bg,256,"%s",bg_theme_name); } else{ win->theme_bg[0]=0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; win->onpool=1; win->prev_screen = libaroma_fb_snapshoot_canvas(); win->ui_thread = _libaroma_window_ui_thread; libaroma_window_measure_size(win); return win; } /* End of libaroma_window */ /* * Function : libaroma_window_free * Return Value: byte * Descriptions: free window */ byte libaroma_window_free( LIBAROMA_WINDOWP win ){ __CHECK_WM(0); if (win==NULL){ return 0; } /* inactivate it */ if (win->parent==NULL){ if (libaroma_wm_get_active_window()==win){ /* detach active window from window manager */ libaroma_wm_set_active_window(NULL); } LIBAROMA_MSG _msg; libaroma_window_process_event(win, libaroma_wm_compose(&_msg, LIBAROMA_MSG_WIN_INACTIVE, NULL, 0, 0)); } if (win->handler!=NULL){ if (win->handler->prefree!=NULL){ win->handler->prefree(win); } } /* delete childs */ int i; if (win->childn>0){ #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ libaroma_control_free(win->childs[i]); } free(win->childs); } if (win->bg){ libaroma_canvas_free(win->bg); win->bg=NULL; } if (win->dc){ libaroma_canvas_free(win->dc); win->dc=NULL; } if (win->handler!=NULL){ if (win->handler->postfree!=NULL){ win->handler->postfree(win); } } free(win); return 1; } /* End of libaroma_window_free */ /* * Function : _libaroma_window_updatebg * Return Value: byte * Descriptions: update window background */ byte _libaroma_window_updatebg(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (win->handler!=NULL){ if (win->handler->updatebg!=NULL){ if (win->handler->updatebg(win)){ if (win->onupdatebg){ win->onupdatebg(win,win->bg); } return 1; } return 0; } } if (win->parent!=NULL){ return 0; } int w = win->w; int h = win->h; /* draw background */ if (win->bg!=NULL){ if ((win->bg->w==w)&&(win->bg->h==h)){ /* not need recreate background */ return 1; } libaroma_canvas_free(win->bg); } win->bg = libaroma_canvas(w,h); /* default canvas color */ libaroma_canvas_setcolor( win->bg, libaroma_colorget(NULL,win)->window_bg, 0xff ); /* from theme canvas */ if (win->theme_bg[0]!=0){ libaroma_wm_draw_theme( win->bg, win->theme_bg, 0, 0, win->bg->w, win->bg->h, NULL ); } /* from updatebg callback */ if (win->onupdatebg!=NULL){ win->onupdatebg(win,win->bg); } return 1; } /* End of _libaroma_window_updatebg */ /* * Function : _libaroma_window_recalculate * Return Value: byte * Descriptions: recalculate client size */ byte _libaroma_window_recalculate(LIBAROMA_WINDOWP win){ if (win==NULL){ ALOGW("window_recalculate win is NULL"); return 0; } if (libaroma_window_isactive(win)){ _libaroma_window_updatebg(win); libaroma_window_invalidate(win, 1); } return 1; } /* End of _libaroma_window_recalculate */ /* * Function : _libaroma_window_ready * Return Value: byte * Descriptions: window is ready */ byte _libaroma_window_ready(LIBAROMA_WINDOWP win){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_resize win is NULL"); return 0; } int x = win->x; int y = win->y; int w = win->w; int h = win->h; if (w==0){ w = libaroma_wm()->w; x = 0; } if (h==0){ h = libaroma_wm()->h; y = 0; } /* set position */ if (win->dc!=NULL){ libaroma_canvas_free(win->dc); win->dc=NULL; } win->dc= libaroma_wm_canvas(x, y, w, h); if (win->dc==NULL){ ALOGW("window_ready cannot allocate workspace drawing canvas"); return 0; }/* if (libaroma_window_isactive(win)){ libaroma_wm_clean_workspace(); }*/ win->x = x; win->y = y; win->w = win->dc->w; win->h = win->dc->h; _libaroma_window_measure_save(win,NULL); _libaroma_window_recalculate(win); return 1; } /* End of _libaroma_window_ready */ /* * Function : libaroma_window_resize * Return Value: byte * Descriptions: resize window */ byte libaroma_window_resize( LIBAROMA_WINDOWP win, int x, int y, int w, int h ){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("window_resize cannot be used for child window"); return 0; } win->rx = x; win->ry = y; win->rw = w; win->rh = h; if (libaroma_window_measure_size(win)){ return _libaroma_window_ready(win); } return 0; } /* End of libaroma_window_resize */ /* * Function : libaroma_window_isactive * Return Value: byte * Descriptions: check if window is active */ byte libaroma_window_isactive(LIBAROMA_WINDOWP win){ if (win!=NULL){ LIBAROMA_WINDOWP w = win; while(w->parent){ w=w->parent; } return ((w==libaroma_wm_get_active_window())?1:0); } return 0; } /* End of libaroma_window_isactive */ /* * Function : libaroma_window_add * Return Value: byte * Descriptions: add control into window */ byte libaroma_window_add( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_add win is NULL"); return 0; } if (ctl==NULL){ ALOGW("window_add ctl is NULL"); return 0; } if (ctl->window != NULL){ ALOGW("window_add ctl already have window"); return 0; } libaroma_window_measure(win, ctl); if (win->childn==0){ win->childs = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)); if (!win->childs){ ALOGW("window_add malloc failed"); win->childs=NULL; return 0; } win->childs[0]=ctl; } else{ LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) realloc(win->childs, sizeof(LIBAROMA_CONTROLP)*(win->childn+1)); if (!newchilds){ ALOGW("window_add realloc failed"); return 0; } win->childs = newchilds; win->childs[win->childn] = ctl; } ctl->window = win; win->childn++; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_add */ /* * Function : libaroma_window_del * Return Value: byte * Descriptions: delete control from window */ byte libaroma_window_del( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl ){ __CHECK_WM(0); if (ctl==NULL){ ALOGW("window_del ctl is null"); return 0; } if (win==NULL){ ALOGW("window_del win is null"); return 0; } if (win != ctl->window){ return 0; } if (win->childn<=0){ ALOGW("window_del window data corrupt doesn't have childs??"); return 0; } else if (win->childn==1){ if (win->childs[0]==ctl){ ctl->window = NULL; win->childn=0; free(win->childs); win->childs=NULL; _libaroma_window_recalculate(win); return 1; } else{ ALOGW("window_del ctl not found in window"); return 0; } } LIBAROMA_CONTROLP * newchilds = (LIBAROMA_CONTROLP *) malloc(sizeof(LIBAROMA_CONTROLP)*(win->childn-1)); if (!newchilds){ ALOGW("window_del malloc temp childs failed"); return 0; } int j = 0; int i; for (i=0;i<win->childn;i++){ if (win->childs[i]!=ctl){ newchilds[j++]=win->childs[i]; if (j==win->childn-2){ /* current ctl not found */ free(newchilds); ALOGW("window_del ctl not found in window"); return 0; } } } free(win->childs); win->childs=newchilds; win->childn--; _libaroma_window_recalculate(win); return 1; } /* End of libaroma_window_del */ /* * Function : libaroma_window_measure * Return Value: byte * Descriptions: measure control size */ byte libaroma_window_measure(LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win&&ctl){ if (_libaroma_window_measurement_dp){ ctl->x = libaroma_dp(ctl->rx); ctl->y = libaroma_dp(ctl->ry); ctl->w = libaroma_dp(ctl->rw); ctl->h = libaroma_dp(ctl->rh); } else{ ctl->x = ctl->rx; ctl->y = ctl->ry; ctl->w = ctl->rw; ctl->h = ctl->rh; } ctl->x=libaroma_window_measure_calculate( ctl->x, ctl->rx, win->w, 0, 0 ); ctl->y=libaroma_window_measure_calculate( ctl->y, ctl->ry, win->h, 0, 0 ); ctl->w=libaroma_window_measure_calculate( ctl->w,ctl->rw, win->w, 1, ctl->x ); ctl->h=libaroma_window_measure_calculate( ctl->h,ctl->rh, win->h, 1, ctl->y ); if (ctl->w+ctl->x>win->w){ ctl->w = win->w-ctl->x; } if (ctl->h+ctl->y>win->h){ ctl->h = win->h-ctl->y; } if (ctl->w<ctl->minw){ ctl->w=ctl->minw; } if (ctl->h<ctl->minh){ ctl->h=ctl->minh; } _libaroma_window_measure_save(NULL,ctl); if (ctl->handler->message){ LIBAROMA_MSG _msg; ctl->handler->message(ctl, libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_MEASURED, NULL, 0, 0) ); return 1; } } return 0; } /* End of libaroma_window_measure */ /* * Function : libaroma_window_attach * Return Value: LIBAROMA_CONTROLP * Descriptions: attach control into window */ LIBAROMA_CONTROLP libaroma_window_attach( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ /* attach into window */ if (win){ if (libaroma_window_add(win,ctl)){ return ctl; } ALOGW("window_attach cannot attach into window"); libaroma_control_free(ctl); return NULL; } return ctl; } /* End of libaroma_window_attach */ /* * Function : libaroma_window_getid * Return Value: LIBAROMA_CONTROLP * Descriptions: get control by id */ LIBAROMA_CONTROLP libaroma_window_getid( LIBAROMA_WINDOWP win, word id){ __CHECK_WM(NULL); if (win==NULL){ ALOGW("window_control_id win is null"); return NULL; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->id==id){ return win->childs[i]; } } return NULL; /* not found */ } /* End of libaroma_window_getid */ /* * Function : libaroma_window_setfocus * Return Value: LIBAROMA_CONTROLP * Descriptions: set control focus */ LIBAROMA_CONTROLP libaroma_window_setfocus( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl){ if (win==NULL){ ALOGW("window_setfocus window is null"); return NULL; } if (ctl!=NULL){ /* set */ if (win!=ctl->window){ ALOGW("window_setfocus control is not window child"); return NULL; } if (ctl->handler->focus!=NULL){ if (win->focused==ctl){ return ctl; } if (ctl->handler->focus(ctl,1)){ if (win->focused){ win->focused->handler->focus(win->focused,0); } win->focused=ctl; return ctl; } } return NULL; } else{ /* find focus */ if (win->focused){ return win->focused; } int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->focus!=NULL){ return libaroma_window_setfocus(win,win->childs[i]); } } } return NULL; } /* End of libaroma_window_setfocus */ /* * Function : libaroma_window_sync * Return Value: byte * Descriptions: sync window canvas */ byte libaroma_window_sync(LIBAROMA_WINDOWP win, int x, int y, int w, int h){ __CHECK_WM(0); if (win==NULL){ ALOGW("libaroma_window_sync win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->sync!=NULL){ return win->handler->sync(win,x,y,w,h); } } if (win->parent!=NULL){ return 0; } if (!win->lock_sync){ if (!libaroma_window_isactive(win)){ ALOGW("libaroma_window_sync win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } /* sync workspace */ libaroma_wm_sync(win->x+x,win->y+y,w,h); } return 1; } /* End of libaroma_window_sync */ /* * Function : libaroma_window_invalidate * Return Value: byte * Descriptions: invalidate window drawing */ byte libaroma_window_invalidate(LIBAROMA_WINDOWP win, byte sync){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_invalidate win is null"); return 0; } if (win->handler!=NULL){ if (win->handler->invalidate!=NULL){ return win->handler->invalidate(win,sync); } } if (win->parent!=NULL){ return 0; } if (!libaroma_window_isactive(win)){ ALOGW("window_invalidate win is not active window"); return 0; } if (win->dc==NULL){ ALOGW("window_invalidate dc is null"); return 0; } if ((!win->lock_sync)||(sync==10)){ /* draw bg */ libaroma_draw( win->dc, win->bg, 0, 0, 1); /* draw childs */ int i; #ifdef LIBAROMA_CONFIG_OPENMP #pragma omp parallel for #endif for (i=0;i<win->childn;i++){ /* draw no sync */ libaroma_control_draw(win->childs[i], 0); } /* sync */ if (sync){ libaroma_window_sync(win, 0, 0, win->w, win->h); } } return 1; } /* End of libaroma_window_invalidate */ /* * Function : libaroma_window_hideshow_animated * Return Value: byte * Descriptions: hide/show window - animated */ byte libaroma_window_hideshow_animated(LIBAROMA_WINDOWP win, byte anim, int duration, byte close){ if ((!anim)||(duration<50)){ if (close) { byte ret=libaroma_wm_set_active_window(NULL); libaroma_window_free(win); return ret; } else return libaroma_wm_set_active_window(win); } /* lock sync */ win->lock_sync = 1; byte is_active; if (close) is_active=1; else is_active=libaroma_wm_set_active_window(win); if (is_active){ if (!close) win->active=2; if (win->prev_screen==NULL) win->prev_screen=libaroma_canvas(win->w, win->h); //TODO: MOVE THIS TO WM CODE //if (!win->prev_screen->alpha && anim==LIBAROMA_WINDOW_SHOW_ANIMATION_CIRCLE) //libaroma_canvas_fillalpha(win->prev_screen, 0, 0, win->w, win->h, 0xFF); //init alpha for prev screen if needed /* draw window into temp canvas */ LIBAROMA_CANVASP wmc = win->dc; //window had a canvas area of wm, let's grab it LIBAROMA_CANVASP tdc = libaroma_canvas(wmc->w,wmc->h); if (close) libaroma_draw(tdc,wmc,0,0,0); win->dc=tdc; /* switch dc to temporary */ //if closing, deactivate window (otherwise ripple animations are played while animate-closing) if (close) libaroma_wm_set_active_window(NULL); else libaroma_window_invalidate(win, 10); //otherwise draw real window image at temp dc long start = libaroma_tick(); int delta = 0; while ((delta=libaroma_tick()-start)<duration){ float state = (((float) delta)/((float) duration)); switch (anim){ case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_TOP: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int y; if (close) y = (swift_out_state * win->h); else y = win->h - (swift_out_state * win->h); int h = win->h - y; if (h>0){ libaroma_draw_ex( wmc, win->prev_screen, 0, 0, 0, 0, win->w, win->h-h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, y, 0, 0, win->w, h, 0, 0xff ); libaroma_wm_sync(win->x,win->y,win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_LEFT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; if (w>0){ libaroma_draw_ex( wmc, win->prev_screen, 0, 0, 0, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, x, 0, 0, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y,win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_PAGE_RIGHT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ //libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, w, 0, w, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, 0, x, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SWAP_LEFT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, 0, 0, w, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, 0, 0, x, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; case LIBAROMA_WINDOW_SHOW_ANIMATION_SWAP_RIGHT: { if (state>=1.0){ break; } float swift_out_state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); int x; if (close) x = swift_out_state * win->w; else x = win->w - (swift_out_state * win->w); int w = win->w - x; //printf("X=%d, W=%d\n", x, w); if (w>0){ libaroma_canvas_setcolor(wmc, RGB(0), 0xFF); libaroma_draw_ex( wmc, win->prev_screen, w, 0, 0, 0, win->w-w, win->h, 0, 0xFF ); libaroma_draw_ex( wmc, win->dc, x, 0, 0, 0, w, win->h, 0, 0xff ); libaroma_wm_sync(win->x,win->y, win->w, win->h); } } break; default:{ state = close? libaroma_cubic_bezier_easeout(state): libaroma_cubic_bezier_easein(state); if (close) state = (1.0 - state); if ((!close && state>=1.0) || (close && state <=0.0)){ break; } //ALOGV("Playing %s animation with state %1.2f", close?"close":"open", state); libaroma_art_draw_switch_animation(libaroma_ani_win_to_art(anim), wmc, win->prev_screen, win->dc, //this is needed because snapshots are taken //using fb size, not wm workspace size libaroma_wm()->x, libaroma_wm()->y, win->prev_screen->w, win->prev_screen->h, 0, 0, win->w, win->h, state); libaroma_wm_sync(win->x, win->y, win->w, win->h); } break; } libaroma_sleep(12); } if (!close) libaroma_draw(wmc,win->dc,0,0,0); //copy real window image to original canvas win->dc=wmc; /* switch dc to wm canvas area */ libaroma_canvas_free(tdc); } win->lock_sync = 0; /* sync view now */ if (close){ //libaroma_wm_set_active_window(NULL); libaroma_wm_sync(win->x,win->y,win->w,win->h); libaroma_window_free(win); } else { win->active=1; libaroma_wm_sync(win->x,win->y,win->w,win->h); /* send activate */ LIBAROMA_MSG _msg; libaroma_window_process_event(win,libaroma_wm_compose( &_msg, LIBAROMA_MSG_WIN_ACTIVE, NULL, 10, 0) ); } return 1; } /* * Function : libaroma_window_calculate_pos * Return Value: void * Descriptions: calculate screen position to window/control position */ void libaroma_window_calculate_pos( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (win!=NULL){ *x-=win->x; *y-=win->y; } else if ((ctl!=NULL)&&(ctl->window!=NULL)){ *x-=ctl->window->x; *y-=ctl->window->y; } if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; } /* *x-=libaroma_wm()->x; *y-=libaroma_wm()->y; */ } /* End of libaroma_window_calculate_pos */ /* * Function : libaroma_window_calculate_pos_abs * Return Value: void * Descriptions: calculate absolute screen position to top window position */ void libaroma_window_calculate_pos_abs( LIBAROMA_WINDOWP win, LIBAROMA_CONTROLP ctl, int * x, int * y ){ if (ctl!=NULL){ *x-=ctl->x; *y-=ctl->y; win=ctl->window; } while (win!=NULL){ *x-=win->ax; *y-=win->ay; win=win->parent; } } /* End of libaroma_window_calculate_pos_abs */ /* * Function : _libaroma_window_is_inside * Return Value: byte * Descriptions: check position coordinate */ byte _libaroma_window_is_inside(LIBAROMA_CONTROLP ctl, int x, int y) { int wx = ctl->x; int wx2 = wx + ctl->w; int wy = ctl->y; int wy2 = wy + ctl->h; if ((x >= wx) && (x < wx2) && (y >= wy) && (y < wy2)) { return 1; } return 0; } /* End of _libaroma_window_is_inside */ /* * Function : libaroma_window_post_command * Return Value: byte * Descriptions: post direct command */ byte libaroma_window_post_command(dword cmd){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, 0, 0, (int) cmd, 0, NULL ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_post_command_ex * Return Value: byte * Descriptions: post direct command extended */ byte libaroma_window_post_command_ex(dword cmd, byte state, int key, int y, voidp d){ return libaroma_msg_post( LIBAROMA_MSG_WIN_DIRECTMSG, state, key, (int) cmd, y, d ); } /* End of libaroma_window_post_command */ /* * Function : libaroma_window_process_event * Return Value: dword * Descriptions: process message */ dword libaroma_window_process_event(LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ __CHECK_WM(0); if (win==NULL){ ALOGW("window_event win is null"); return 0; } if (win->parent!=NULL){ ALOGW("window_event cannot used for child window..."); return 0; } dword ret = 0; if (win->handler){ if (win->handler->message_hooker){ if (win->handler->message_hooker(win,msg,&ret)){ return ret; } } } switch (msg->msg){ case LIBAROMA_MSG_WIN_ACTIVE: { /* set current window size */ win->focused=NULL; win->touched=NULL; if (msg->x!=10){ _libaroma_window_ready(win); } if ((!win->lock_sync)||(msg->x==10)){ if ((!win->active)||(msg->x==10)){ int i; win->active=1; /* signal child */ for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } } } break; case LIBAROMA_MSG_WIN_RESIZE: { int i; _libaroma_window_ready(win); for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } } break; case LIBAROMA_MSG_WIN_INACTIVE: { if (win->active){ /* stop thread manager */ win->active=0; /* send inactive message to child */ int i; for (i=0;i<win->childn;i++){ if (win->childs[i]->handler->message){ win->childs[i]->handler->message(win->childs[i], msg); } } win->focused=NULL; win->touched=NULL; } } break; case LIBAROMA_MSG_WIN_MEASURED: { /* remeasured all childs */ int i; for (i=0;i<win->childn;i++){ libaroma_window_measure(win,win->childs[i]); } } break; case LIBAROMA_MSG_WIN_DIRECTMSG: { return (dword) msg->x; } break; case LIBAROMA_MSG_WIN_INVALIDATE: { libaroma_window_invalidate(win, 1); } break; case LIBAROMA_MSG_TOUCH: { /* touch handler */ if (msg->state==LIBAROMA_HID_EV_STATE_DOWN){ win->touched = NULL; int x = msg->x; int y = msg->y; libaroma_window_calculate_pos(win,NULL,&x,&y); int i; for (i=0;i<win->childn;i++){ if (_libaroma_window_is_inside(win->childs[i],x,y)){ win->touched = win->childs[i]; break; } } if (win->touched!=NULL){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } } else if (win->touched!=NULL){ if (msg->state==LIBAROMA_HID_EV_STATE_MOVE){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } } else if (msg->state==LIBAROMA_HID_EV_STATE_UP){ if (win->touched->handler->message){ ret=win->touched->handler->message(win->touched, msg); } win->touched=NULL; } } } break; } return ret; } /* End of libaroma_window_process_event */ /* * Function : libaroma_window_pool * Return Value: dword * Descriptions: poll window messages */ dword libaroma_window_pool( LIBAROMA_WINDOWP win, LIBAROMA_MSGP msg){ if (!win){ return 0; } if (win->parent!=NULL){ ALOGW("cannot pool child window..."); return 0; } LIBAROMA_MSG _msg; LIBAROMA_MSGP cmsg=(msg!=NULL)?msg:&_msg; byte ret = libaroma_wm_getmessage(cmsg); if (ret){ dword command = libaroma_window_process_event(win,cmsg); if (command && cmsg->d){ byte cmd = LIBAROMA_CMD(command); if (cmd == LIBAROMA_CMD_CLICK || cmd == LIBAROMA_CMD_HOLD){ LIBAROMA_CONTROLP ctl = (LIBAROMA_CONTROLP) cmsg->d; if (!ctl) { return command; } if (cmd == LIBAROMA_CMD_CLICK && ctl->onclick){ ctl->onclick(ctl); } else if (cmd == LIBAROMA_CMD_HOLD && ctl->onhold){ ctl->onhold(ctl); } } } return command; } return 0; } /* End of libaroma_window_pool */ #undef __CHECK_WM #ifdef __cplusplus } #endif #endif /* __libaroma_window_c__ */
convolution_1x1_pack8_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv1x1s1_sgemm_transform_kernel_pack8_fp16sa_neon(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch) { // interleave // src = inch-outch // dst = 8b-8a-inch/8a-outch/8b kernel_tm_pack8.create(1, inch / 8, outch / 8, (size_t)2u * 64, 64); int q = 0; for (; q + 7 < outch; q += 8) { const float* k0 = (const float*)kernel + (q + 0) * inch; const float* k1 = (const float*)kernel + (q + 1) * inch; const float* k2 = (const float*)kernel + (q + 2) * inch; const float* k3 = (const float*)kernel + (q + 3) * inch; const float* k4 = (const float*)kernel + (q + 4) * inch; const float* k5 = (const float*)kernel + (q + 5) * inch; const float* k6 = (const float*)kernel + (q + 6) * inch; const float* k7 = (const float*)kernel + (q + 7) * inch; __fp16* g0 = kernel_tm_pack8.channel(q / 8); for (int p = 0; p + 7 < inch; p += 8) { for (int i = 0; i < 8; i++) { g0[0] = (__fp16)k0[i]; g0[1] = (__fp16)k1[i]; g0[2] = (__fp16)k2[i]; g0[3] = (__fp16)k3[i]; g0[4] = (__fp16)k4[i]; g0[5] = (__fp16)k5[i]; g0[6] = (__fp16)k6[i]; g0[7] = (__fp16)k7[i]; g0 += 8; } k0 += 8; k1 += 8; k2 += 8; k3 += 8; k4 += 8; k5 += 8; k6 += 8; k7 += 8; } } } static void conv1x1s1_sgemm_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int inch = bottom_blob.c; int outch = top_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; const int size = w * h; const __fp16* bias = _bias; // interleave Mat tmp; if (size >= 12) tmp.create(12, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, elemsize, elempack, opt.workspace_allocator); else if (size >= 8) tmp.create(8, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); else if (size >= 4) tmp.create(4, inch, size / 4 + (size % 4) / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); else if (size >= 2) tmp.create(2, inch, size / 2 + size % 2, elemsize, elempack, opt.workspace_allocator); else // if (size >= 1) tmp.create(1, inch, size, elemsize, elempack, opt.workspace_allocator); { int nn_size; int remain_size_start; nn_size = size / 12; remain_size_start = nn_size * 12; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 12; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { // transpose 12x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0], #64 \n" "ld4 {v16.8h, v17.8h, v18.8h, v19.8h}, [%0] \n" "sub %0, %0, #128 \n" "uzp1 v20.8h, v0.8h, v4.8h \n" // 0 "uzp1 v21.8h, v16.8h, v1.8h \n" // 1 "uzp1 v22.8h, v5.8h, v17.8h \n" // 2 "uzp1 v23.8h, v2.8h, v6.8h \n" // 3 "uzp1 v24.8h, v18.8h, v3.8h \n" // 4 "uzp1 v25.8h, v7.8h, v19.8h \n" // 5 "uzp2 v26.8h, v0.8h, v4.8h \n" // 6 "uzp2 v27.8h, v16.8h, v1.8h \n" // 7 "uzp2 v28.8h, v5.8h, v17.8h \n" // 8 "uzp2 v29.8h, v2.8h, v6.8h \n" // 9 "uzp2 v30.8h, v18.8h, v3.8h \n" // 10 "uzp2 v31.8h, v7.8h, v19.8h \n" // 11 "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); img0 += bottom_blob.cstep * 8; } } nn_size = (size - remain_size_start) >> 3; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); for (int q = 0; q < inch; q++) { // transpose 8x8 asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n" "ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n" "sub %0, %0, #64 \n" "uzp1 v16.8h, v0.8h, v4.8h \n" "uzp2 v20.8h, v0.8h, v4.8h \n" "uzp1 v17.8h, v1.8h, v5.8h \n" "uzp2 v21.8h, v1.8h, v5.8h \n" "uzp1 v18.8h, v2.8h, v6.8h \n" "uzp2 v22.8h, v2.8h, v6.8h \n" "uzp1 v19.8h, v3.8h, v7.8h \n" "uzp2 v23.8h, v3.8h, v7.8h \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n" "st1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.8h, v1.8h}, [%0] \n" "st1 {v0.8h, v1.8h}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); img0 += bottom_blob.cstep * 8; } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { const __fp16* img0 = bottom_blob.channel(0); img0 += i * 8; __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); for (int q = 0; q < inch; q++) { asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.8h}, [%0] \n" "st1 {v0.8h}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); img0 += bottom_blob.cstep * 8; } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr0 = top_blob.channel(p); const __fp16 zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const __fp16* biasptr = bias ? bias + p * 8 : zeros; int i = 0; for (; i + 11 < size; i += 12) { __fp16* tmpptr = tmp.channel(i / 12); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v20.8h}, [%8] \n" "mov v21.16b, v20.16b \n" "mov v22.16b, v20.16b \n" "mov v23.16b, v20.16b \n" "mov v24.16b, v20.16b \n" "mov v25.16b, v20.16b \n" "mov v26.16b, v20.16b \n" "mov v27.16b, v20.16b \n" "mov v28.16b, v20.16b \n" "mov v29.16b, v20.16b \n" "mov v30.16b, v20.16b \n" "mov v31.16b, v20.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w0123 "fmla v20.8h, v12.8h, v0.h[0] \n" "fmla v21.8h, v12.8h, v0.h[1] \n" "fmla v22.8h, v12.8h, v0.h[2] \n" "fmla v23.8h, v12.8h, v0.h[3] \n" "fmla v24.8h, v12.8h, v0.h[4] \n" "fmla v25.8h, v12.8h, v0.h[5] \n" "fmla v26.8h, v12.8h, v0.h[6] \n" "fmla v27.8h, v12.8h, v0.h[7] \n" "fmla v28.8h, v12.8h, v1.h[0] \n" "fmla v29.8h, v12.8h, v1.h[1] \n" "fmla v30.8h, v12.8h, v1.h[2] \n" "fmla v31.8h, v12.8h, v1.h[3] \n" "fmla v20.8h, v13.8h, v1.h[4] \n" "fmla v21.8h, v13.8h, v1.h[5] \n" "fmla v22.8h, v13.8h, v1.h[6] \n" "fmla v23.8h, v13.8h, v1.h[7] \n" "fmla v24.8h, v13.8h, v2.h[0] \n" "fmla v25.8h, v13.8h, v2.h[1] \n" "fmla v26.8h, v13.8h, v2.h[2] \n" "fmla v27.8h, v13.8h, v2.h[3] \n" "fmla v28.8h, v13.8h, v2.h[4] \n" "fmla v29.8h, v13.8h, v2.h[5] \n" "fmla v30.8h, v13.8h, v2.h[6] \n" "fmla v31.8h, v13.8h, v2.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v20.8h, v14.8h, v3.h[0] \n" "fmla v21.8h, v14.8h, v3.h[1] \n" "fmla v22.8h, v14.8h, v3.h[2] \n" "fmla v23.8h, v14.8h, v3.h[3] \n" "fmla v24.8h, v14.8h, v3.h[4] \n" "fmla v25.8h, v14.8h, v3.h[5] \n" "fmla v26.8h, v14.8h, v3.h[6] \n" "fmla v27.8h, v14.8h, v3.h[7] \n" "fmla v28.8h, v14.8h, v4.h[0] \n" "fmla v29.8h, v14.8h, v4.h[1] \n" "fmla v30.8h, v14.8h, v4.h[2] \n" "fmla v31.8h, v14.8h, v4.h[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%3], #64 \n" // w4567 "fmla v20.8h, v15.8h, v4.h[4] \n" "fmla v21.8h, v15.8h, v4.h[5] \n" "fmla v22.8h, v15.8h, v4.h[6] \n" "fmla v23.8h, v15.8h, v4.h[7] \n" "fmla v24.8h, v15.8h, v5.h[0] \n" "fmla v25.8h, v15.8h, v5.h[1] \n" "fmla v26.8h, v15.8h, v5.h[2] \n" "fmla v27.8h, v15.8h, v5.h[3] \n" "fmla v28.8h, v15.8h, v5.h[4] \n" "fmla v29.8h, v15.8h, v5.h[5] \n" "fmla v30.8h, v15.8h, v5.h[6] \n" "fmla v31.8h, v15.8h, v5.h[7] \n" "fmla v20.8h, v16.8h, v6.h[0] \n" "fmla v21.8h, v16.8h, v6.h[1] \n" "fmla v22.8h, v16.8h, v6.h[2] \n" "fmla v23.8h, v16.8h, v6.h[3] \n" "fmla v24.8h, v16.8h, v6.h[4] \n" "fmla v25.8h, v16.8h, v6.h[5] \n" "fmla v26.8h, v16.8h, v6.h[6] \n" "fmla v27.8h, v16.8h, v6.h[7] \n" "fmla v28.8h, v16.8h, v7.h[0] \n" "fmla v29.8h, v16.8h, v7.h[1] \n" "fmla v30.8h, v16.8h, v7.h[2] \n" "fmla v31.8h, v16.8h, v7.h[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%2], #64 \n" // r891011 "fmla v20.8h, v17.8h, v7.h[4] \n" "fmla v21.8h, v17.8h, v7.h[5] \n" "fmla v22.8h, v17.8h, v7.h[6] \n" "fmla v23.8h, v17.8h, v7.h[7] \n" "fmla v24.8h, v17.8h, v8.h[0] \n" "fmla v25.8h, v17.8h, v8.h[1] \n" "fmla v26.8h, v17.8h, v8.h[2] \n" "fmla v27.8h, v17.8h, v8.h[3] \n" "fmla v28.8h, v17.8h, v8.h[4] \n" "fmla v29.8h, v17.8h, v8.h[5] \n" "fmla v30.8h, v17.8h, v8.h[6] \n" "fmla v31.8h, v17.8h, v8.h[7] \n" "fmla v20.8h, v18.8h, v9.h[0] \n" "fmla v21.8h, v18.8h, v9.h[1] \n" "fmla v22.8h, v18.8h, v9.h[2] \n" "fmla v23.8h, v18.8h, v9.h[3] \n" "fmla v24.8h, v18.8h, v9.h[4] \n" "fmla v25.8h, v18.8h, v9.h[5] \n" "fmla v26.8h, v18.8h, v9.h[6] \n" "fmla v27.8h, v18.8h, v9.h[7] \n" "fmla v28.8h, v18.8h, v10.h[0] \n" "fmla v29.8h, v18.8h, v10.h[1] \n" "fmla v30.8h, v18.8h, v10.h[2] \n" "fmla v31.8h, v18.8h, v10.h[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.8h, v19.8h, v10.h[4] \n" "fmla v21.8h, v19.8h, v10.h[5] \n" "fmla v22.8h, v19.8h, v10.h[6] \n" "fmla v23.8h, v19.8h, v10.h[7] \n" "fmla v24.8h, v19.8h, v11.h[0] \n" "fmla v25.8h, v19.8h, v11.h[1] \n" "fmla v26.8h, v19.8h, v11.h[2] \n" "fmla v27.8h, v19.8h, v11.h[3] \n" "fmla v28.8h, v19.8h, v11.h[4] \n" "fmla v29.8h, v19.8h, v11.h[5] \n" "fmla v30.8h, v19.8h, v11.h[6] \n" "fmla v31.8h, v19.8h, v11.h[7] \n" "bne 0b \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" "st1 {v24.8h, v25.8h, v26.8h, v27.8h}, [%1], #64 \n" "st1 {v28.8h, v29.8h, v30.8h, v31.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "mov v20.16b, v16.16b \n" "mov v21.16b, v16.16b \n" "mov v22.16b, v16.16b \n" "mov v23.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v0.h[1] \n" "fmla v18.8h, v8.8h, v0.h[2] \n" "fmla v19.8h, v8.8h, v0.h[3] \n" "fmla v20.8h, v8.8h, v0.h[4] \n" "fmla v21.8h, v8.8h, v0.h[5] \n" "fmla v22.8h, v8.8h, v0.h[6] \n" "fmla v23.8h, v8.8h, v0.h[7] \n" "fmla v16.8h, v9.8h, v1.h[0] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v1.h[2] \n" "fmla v19.8h, v9.8h, v1.h[3] \n" "fmla v20.8h, v9.8h, v1.h[4] \n" "fmla v21.8h, v9.8h, v1.h[5] \n" "fmla v22.8h, v9.8h, v1.h[6] \n" "fmla v23.8h, v9.8h, v1.h[7] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%2], #64 \n" // r4567 "fmla v16.8h, v10.8h, v2.h[0] \n" "fmla v17.8h, v10.8h, v2.h[1] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v2.h[3] \n" "fmla v20.8h, v10.8h, v2.h[4] \n" "fmla v21.8h, v10.8h, v2.h[5] \n" "fmla v22.8h, v10.8h, v2.h[6] \n" "fmla v23.8h, v10.8h, v2.h[7] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v11.8h, v3.h[0] \n" "fmla v17.8h, v11.8h, v3.h[1] \n" "fmla v18.8h, v11.8h, v3.h[2] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v20.8h, v11.8h, v3.h[4] \n" "fmla v21.8h, v11.8h, v3.h[5] \n" "fmla v22.8h, v11.8h, v3.h[6] \n" "fmla v23.8h, v11.8h, v3.h[7] \n" "fmla v16.8h, v12.8h, v4.h[0] \n" "fmla v17.8h, v12.8h, v4.h[1] \n" "fmla v18.8h, v12.8h, v4.h[2] \n" "fmla v19.8h, v12.8h, v4.h[3] \n" "fmla v20.8h, v12.8h, v4.h[4] \n" "fmla v21.8h, v12.8h, v4.h[5] \n" "fmla v22.8h, v12.8h, v4.h[6] \n" "fmla v23.8h, v12.8h, v4.h[7] \n" "fmla v16.8h, v13.8h, v5.h[0] \n" "fmla v17.8h, v13.8h, v5.h[1] \n" "fmla v18.8h, v13.8h, v5.h[2] \n" "fmla v19.8h, v13.8h, v5.h[3] \n" "fmla v20.8h, v13.8h, v5.h[4] \n" "fmla v21.8h, v13.8h, v5.h[5] \n" "fmla v22.8h, v13.8h, v5.h[6] \n" "fmla v23.8h, v13.8h, v5.h[7] \n" "fmla v16.8h, v14.8h, v6.h[0] \n" "fmla v17.8h, v14.8h, v6.h[1] \n" "fmla v18.8h, v14.8h, v6.h[2] \n" "fmla v19.8h, v14.8h, v6.h[3] \n" "fmla v20.8h, v14.8h, v6.h[4] \n" "fmla v21.8h, v14.8h, v6.h[5] \n" "fmla v22.8h, v14.8h, v6.h[6] \n" "fmla v23.8h, v14.8h, v6.h[7] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v7.h[0] \n" "fmla v17.8h, v15.8h, v7.h[1] \n" "fmla v18.8h, v15.8h, v7.h[2] \n" "fmla v19.8h, v15.8h, v7.h[3] \n" "fmla v20.8h, v15.8h, v7.h[4] \n" "fmla v21.8h, v15.8h, v7.h[5] \n" "fmla v22.8h, v15.8h, v7.h[6] \n" "fmla v23.8h, v15.8h, v7.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" "st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 3 < size; i += 4) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "mov v18.16b, v16.16b \n" "mov v19.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%2], #64 \n" // r0123 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v18.8h, v8.8h, v2.h[0] \n" "fmla v19.8h, v8.8h, v3.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "fmla v18.8h, v9.8h, v2.h[1] \n" "fmla v19.8h, v9.8h, v3.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v18.8h, v10.8h, v2.h[2] \n" "fmla v19.8h, v10.8h, v3.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v18.8h, v11.8h, v2.h[3] \n" "fmla v19.8h, v11.8h, v3.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v18.8h, v12.8h, v2.h[4] \n" "fmla v19.8h, v12.8h, v3.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v18.8h, v13.8h, v2.h[5] \n" "fmla v19.8h, v13.8h, v3.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "fmla v18.8h, v14.8h, v2.h[6] \n" "fmla v19.8h, v14.8h, v3.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "fmla v18.8h, v15.8h, v2.h[7] \n" "fmla v19.8h, v15.8h, v3.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i + 1 < size; i += 2) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "mov v17.16b, v16.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.8h, v1.8h}, [%2], #32 \n" // r01 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v17.8h, v8.8h, v1.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "fmla v17.8h, v9.8h, v1.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v17.8h, v10.8h, v1.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v17.8h, v11.8h, v1.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v17.8h, v12.8h, v1.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "fmla v17.8h, v13.8h, v1.h[5] \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v17.8h, v14.8h, v1.h[6] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "fmla v17.8h, v15.8h, v1.h[7] \n" "bne 0b \n" "st1 {v16.8h, v17.8h}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } for (; i < size; i++) { __fp16* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const __fp16* kptr0 = kernel.channel(p); int nn = inch; // inch always > 0 asm volatile( "ld1 {v16.8h}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.8h}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%3], #64 \n" // w0123 "fmla v16.8h, v8.8h, v0.h[0] \n" "fmla v16.8h, v9.8h, v0.h[1] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%3], #64 \n" // w4567 "fmla v16.8h, v10.8h, v0.h[2] \n" "fmla v16.8h, v11.8h, v0.h[3] \n" "fmla v16.8h, v12.8h, v0.h[4] \n" "fmla v16.8h, v13.8h, v0.h[5] \n" "subs %w0, %w0, #1 \n" "fmla v16.8h, v14.8h, v0.h[6] \n" "fmla v16.8h, v15.8h, v0.h[7] \n" "bne 0b \n" "st1 {v16.8h}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16"); } } // // NOTE sgemm // for (; p<outch; p++) // { // Mat out0 = top_blob.channel(p); // // const __fp16 bias0 = bias ? bias[p] : 0.f; // // __fp16* outptr0 = out0; // // for (int i=0; i<size; i++) // { // __fp16 sum = bias0; // // const __fp16* kptr = _kernel.channel(p); // // for (int q=0; q<inch; q++) // { // const __fp16* img0 = bottom_blob.channel(q); // // sum += img0[i] * kptr[0]; // kptr ++; // } // // outptr0[i] = sum; // } // } } static void conv1x1s2_pack8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; size_t elemsize = bottom_blob.elemsize; int elempack = bottom_blob.elempack; int outw = top_blob.w; int outh = top_blob.h; const int tailstep = (w - 2 * outw + w) * 8; Mat bottom_blob_shrinked; bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < channels; p++) { const __fp16* r0 = bottom_blob.channel(p); __fp16* outptr = bottom_blob_shrinked.channel(p); for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); float16x8_t _v2 = vld1q_f16(r0 + 32); float16x8_t _v3 = vld1q_f16(r0 + 48); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); vst1q_f16(outptr + 16, _v2); vst1q_f16(outptr + 24, _v3); r0 += 64; outptr += 32; } for (; j + 1 < outw; j += 2) { float16x8_t _v0 = vld1q_f16(r0); float16x8_t _v1 = vld1q_f16(r0 + 16); vst1q_f16(outptr, _v0); vst1q_f16(outptr + 8, _v1); r0 += 32; outptr += 16; } for (; j < outw; j++) { float16x8_t _v = vld1q_f16(r0); vst1q_f16(outptr, _v); r0 += 16; outptr += 8; } r0 += tailstep; } } conv1x1s1_sgemm_pack8_fp16sa_neon(bottom_blob_shrinked, top_blob, kernel, _bias, opt); }
dds.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD DDDD SSSSS % % D D D D SS % % D D D D SSS % % D D D D SS % % DDDD DDDD SSSSS % % % % % % Read/Write Microsoft Direct Draw Surface Image Format % % % % Software Design % % Bianca van Schaik % % March 2008 % % Dirk Lemstra % % September 2013 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/module.h" #include "MagickCore/transform.h" /* Definitions */ #define DDSD_CAPS 0x00000001 #define DDSD_HEIGHT 0x00000002 #define DDSD_WIDTH 0x00000004 #define DDSD_PITCH 0x00000008 #define DDSD_PIXELFORMAT 0x00001000 #define DDSD_MIPMAPCOUNT 0x00020000 #define DDSD_LINEARSIZE 0x00080000 #define DDSD_DEPTH 0x00800000 #define DDPF_ALPHAPIXELS 0x00000001 #define DDPF_FOURCC 0x00000004 #define DDPF_RGB 0x00000040 #define DDPF_LUMINANCE 0x00020000 #define FOURCC_DXT1 0x31545844 #define FOURCC_DXT3 0x33545844 #define FOURCC_DXT5 0x35545844 #define FOURCC_DX10 0x30315844 #define DDSCAPS_COMPLEX 0x00000008 #define DDSCAPS_TEXTURE 0x00001000 #define DDSCAPS_MIPMAP 0x00400000 #define DDSCAPS2_CUBEMAP 0x00000200 #define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400 #define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800 #define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000 #define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000 #define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000 #define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000 #define DDSCAPS2_VOLUME 0x00200000 #define DDSEXT_DIMENSION_TEX2D 0x00000003 #define DDSEXTFLAGS_CUBEMAP 0x00000004 typedef enum DXGI_FORMAT { DXGI_FORMAT_UNKNOWN, DXGI_FORMAT_R32G32B32A32_TYPELESS, DXGI_FORMAT_R32G32B32A32_FLOAT, DXGI_FORMAT_R32G32B32A32_UINT, DXGI_FORMAT_R32G32B32A32_SINT, DXGI_FORMAT_R32G32B32_TYPELESS, DXGI_FORMAT_R32G32B32_FLOAT, DXGI_FORMAT_R32G32B32_UINT, DXGI_FORMAT_R32G32B32_SINT, DXGI_FORMAT_R16G16B16A16_TYPELESS, DXGI_FORMAT_R16G16B16A16_FLOAT, DXGI_FORMAT_R16G16B16A16_UNORM, DXGI_FORMAT_R16G16B16A16_UINT, DXGI_FORMAT_R16G16B16A16_SNORM, DXGI_FORMAT_R16G16B16A16_SINT, DXGI_FORMAT_R32G32_TYPELESS, DXGI_FORMAT_R32G32_FLOAT, DXGI_FORMAT_R32G32_UINT, DXGI_FORMAT_R32G32_SINT, DXGI_FORMAT_R32G8X24_TYPELESS, DXGI_FORMAT_D32_FLOAT_S8X24_UINT, DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS, DXGI_FORMAT_X32_TYPELESS_G8X24_UINT, DXGI_FORMAT_R10G10B10A2_TYPELESS, DXGI_FORMAT_R10G10B10A2_UNORM, DXGI_FORMAT_R10G10B10A2_UINT, DXGI_FORMAT_R11G11B10_FLOAT, DXGI_FORMAT_R8G8B8A8_TYPELESS, DXGI_FORMAT_R8G8B8A8_UNORM, DXGI_FORMAT_R8G8B8A8_UNORM_SRGB, DXGI_FORMAT_R8G8B8A8_UINT, DXGI_FORMAT_R8G8B8A8_SNORM, DXGI_FORMAT_R8G8B8A8_SINT, DXGI_FORMAT_R16G16_TYPELESS, DXGI_FORMAT_R16G16_FLOAT, DXGI_FORMAT_R16G16_UNORM, DXGI_FORMAT_R16G16_UINT, DXGI_FORMAT_R16G16_SNORM, DXGI_FORMAT_R16G16_SINT, DXGI_FORMAT_R32_TYPELESS, DXGI_FORMAT_D32_FLOAT, DXGI_FORMAT_R32_FLOAT, DXGI_FORMAT_R32_UINT, DXGI_FORMAT_R32_SINT, DXGI_FORMAT_R24G8_TYPELESS, DXGI_FORMAT_D24_UNORM_S8_UINT, DXGI_FORMAT_R24_UNORM_X8_TYPELESS, DXGI_FORMAT_X24_TYPELESS_G8_UINT, DXGI_FORMAT_R8G8_TYPELESS, DXGI_FORMAT_R8G8_UNORM, DXGI_FORMAT_R8G8_UINT, DXGI_FORMAT_R8G8_SNORM, DXGI_FORMAT_R8G8_SINT, DXGI_FORMAT_R16_TYPELESS, DXGI_FORMAT_R16_FLOAT, DXGI_FORMAT_D16_UNORM, DXGI_FORMAT_R16_UNORM, DXGI_FORMAT_R16_UINT, DXGI_FORMAT_R16_SNORM, DXGI_FORMAT_R16_SINT, DXGI_FORMAT_R8_TYPELESS, DXGI_FORMAT_R8_UNORM, DXGI_FORMAT_R8_UINT, DXGI_FORMAT_R8_SNORM, DXGI_FORMAT_R8_SINT, DXGI_FORMAT_A8_UNORM, DXGI_FORMAT_R1_UNORM, DXGI_FORMAT_R9G9B9E5_SHAREDEXP, DXGI_FORMAT_R8G8_B8G8_UNORM, DXGI_FORMAT_G8R8_G8B8_UNORM, DXGI_FORMAT_BC1_TYPELESS, DXGI_FORMAT_BC1_UNORM, DXGI_FORMAT_BC1_UNORM_SRGB, DXGI_FORMAT_BC2_TYPELESS, DXGI_FORMAT_BC2_UNORM, DXGI_FORMAT_BC2_UNORM_SRGB, DXGI_FORMAT_BC3_TYPELESS, DXGI_FORMAT_BC3_UNORM, DXGI_FORMAT_BC3_UNORM_SRGB, DXGI_FORMAT_BC4_TYPELESS, DXGI_FORMAT_BC4_UNORM, DXGI_FORMAT_BC4_SNORM, DXGI_FORMAT_BC5_TYPELESS, DXGI_FORMAT_BC5_UNORM, DXGI_FORMAT_BC5_SNORM, DXGI_FORMAT_B5G6R5_UNORM, DXGI_FORMAT_B5G5R5A1_UNORM, DXGI_FORMAT_B8G8R8A8_UNORM, DXGI_FORMAT_B8G8R8X8_UNORM, DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM, DXGI_FORMAT_B8G8R8A8_TYPELESS, DXGI_FORMAT_B8G8R8A8_UNORM_SRGB, DXGI_FORMAT_B8G8R8X8_TYPELESS, DXGI_FORMAT_B8G8R8X8_UNORM_SRGB, DXGI_FORMAT_BC6H_TYPELESS, DXGI_FORMAT_BC6H_UF16, DXGI_FORMAT_BC6H_SF16, DXGI_FORMAT_BC7_TYPELESS, DXGI_FORMAT_BC7_UNORM, DXGI_FORMAT_BC7_UNORM_SRGB, DXGI_FORMAT_AYUV, DXGI_FORMAT_Y410, DXGI_FORMAT_Y416, DXGI_FORMAT_NV12, DXGI_FORMAT_P010, DXGI_FORMAT_P016, DXGI_FORMAT_420_OPAQUE, DXGI_FORMAT_YUY2, DXGI_FORMAT_Y210, DXGI_FORMAT_Y216, DXGI_FORMAT_NV11, DXGI_FORMAT_AI44, DXGI_FORMAT_IA44, DXGI_FORMAT_P8, DXGI_FORMAT_A8P8, DXGI_FORMAT_B4G4R4A4_UNORM, DXGI_FORMAT_P208, DXGI_FORMAT_V208, DXGI_FORMAT_V408, DXGI_FORMAT_SAMPLER_FEEDBACK_MIN_MIP_OPAQUE, DXGI_FORMAT_SAMPLER_FEEDBACK_MIP_REGION_USED_OPAQUE, DXGI_FORMAT_FORCE_UINT } DXGI_FORMAT; #ifndef SIZE_MAX #define SIZE_MAX ((size_t) -1) #endif /* Structure declarations. */ typedef struct _DDSPixelFormat { size_t flags, fourcc, rgb_bitcount, r_bitmask, g_bitmask, b_bitmask, alpha_bitmask; } DDSPixelFormat; typedef struct _DDSInfo { size_t flags, height, width, pitchOrLinearSize, depth, mipmapcount, ddscaps1, ddscaps2, extFormat, extDimension, extFlags, extArraySize, extFlags2; DDSPixelFormat pixelformat; } DDSInfo; typedef struct _DDSColors { unsigned char r[4], g[4], b[4], a[4]; } DDSColors; typedef struct _DDSVector4 { float x, y, z, w; } DDSVector4; typedef struct _DDSVector3 { float x, y, z; } DDSVector3; typedef struct _DDSSourceBlock { unsigned char start, end, error; } DDSSourceBlock; typedef struct _DDSSingleColorLookup { DDSSourceBlock sources[2]; } DDSSingleColorLookup; typedef MagickBooleanType DDSDecoder(const ImageInfo *,Image *,DDSInfo *,const MagickBooleanType, ExceptionInfo *); typedef MagickBooleanType DDSPixelDecoder(Image *,DDSInfo *,ExceptionInfo *); static const DDSSingleColorLookup DDSLookup_5_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 1 } } }, { { { 0, 0, 2 }, { 0, 1, 0 } } }, { { { 0, 0, 3 }, { 0, 1, 1 } } }, { { { 0, 0, 4 }, { 0, 2, 1 } } }, { { { 1, 0, 3 }, { 0, 2, 0 } } }, { { { 1, 0, 2 }, { 0, 2, 1 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 1, 2, 1 } } }, { { { 1, 0, 2 }, { 1, 2, 0 } } }, { { { 1, 0, 3 }, { 0, 4, 0 } } }, { { { 1, 0, 4 }, { 0, 5, 1 } } }, { { { 2, 0, 3 }, { 0, 5, 0 } } }, { { { 2, 0, 2 }, { 0, 5, 1 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 2, 3, 1 } } }, { { { 2, 0, 2 }, { 2, 3, 0 } } }, { { { 2, 0, 3 }, { 0, 7, 0 } } }, { { { 2, 0, 4 }, { 1, 6, 1 } } }, { { { 3, 0, 3 }, { 1, 6, 0 } } }, { { { 3, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 2 }, { 0, 10, 1 } } }, { { { 3, 0, 3 }, { 0, 10, 0 } } }, { { { 3, 0, 4 }, { 2, 7, 1 } } }, { { { 4, 0, 4 }, { 2, 7, 0 } } }, { { { 4, 0, 3 }, { 0, 11, 0 } } }, { { { 4, 0, 2 }, { 1, 10, 1 } } }, { { { 4, 0, 1 }, { 1, 10, 0 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 1 } } }, { { { 4, 0, 2 }, { 0, 13, 0 } } }, { { { 4, 0, 3 }, { 0, 13, 1 } } }, { { { 4, 0, 4 }, { 0, 14, 1 } } }, { { { 5, 0, 3 }, { 0, 14, 0 } } }, { { { 5, 0, 2 }, { 2, 11, 1 } } }, { { { 5, 0, 1 }, { 2, 11, 0 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 1, 14, 1 } } }, { { { 5, 0, 2 }, { 1, 14, 0 } } }, { { { 5, 0, 3 }, { 0, 16, 0 } } }, { { { 5, 0, 4 }, { 0, 17, 1 } } }, { { { 6, 0, 3 }, { 0, 17, 0 } } }, { { { 6, 0, 2 }, { 0, 17, 1 } } }, { { { 6, 0, 1 }, { 0, 18, 1 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 2, 15, 1 } } }, { { { 6, 0, 2 }, { 2, 15, 0 } } }, { { { 6, 0, 3 }, { 0, 19, 0 } } }, { { { 6, 0, 4 }, { 1, 18, 1 } } }, { { { 7, 0, 3 }, { 1, 18, 0 } } }, { { { 7, 0, 2 }, { 0, 20, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 21, 1 } } }, { { { 7, 0, 2 }, { 0, 22, 1 } } }, { { { 7, 0, 3 }, { 0, 22, 0 } } }, { { { 7, 0, 4 }, { 2, 19, 1 } } }, { { { 8, 0, 4 }, { 2, 19, 0 } } }, { { { 8, 0, 3 }, { 0, 23, 0 } } }, { { { 8, 0, 2 }, { 1, 22, 1 } } }, { { { 8, 0, 1 }, { 1, 22, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 1 } } }, { { { 8, 0, 2 }, { 0, 25, 0 } } }, { { { 8, 0, 3 }, { 0, 25, 1 } } }, { { { 8, 0, 4 }, { 0, 26, 1 } } }, { { { 9, 0, 3 }, { 0, 26, 0 } } }, { { { 9, 0, 2 }, { 2, 23, 1 } } }, { { { 9, 0, 1 }, { 2, 23, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 1, 26, 1 } } }, { { { 9, 0, 2 }, { 1, 26, 0 } } }, { { { 9, 0, 3 }, { 0, 28, 0 } } }, { { { 9, 0, 4 }, { 0, 29, 1 } } }, { { { 10, 0, 3 }, { 0, 29, 0 } } }, { { { 10, 0, 2 }, { 0, 29, 1 } } }, { { { 10, 0, 1 }, { 0, 30, 1 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 2, 27, 1 } } }, { { { 10, 0, 2 }, { 2, 27, 0 } } }, { { { 10, 0, 3 }, { 0, 31, 0 } } }, { { { 10, 0, 4 }, { 1, 30, 1 } } }, { { { 11, 0, 3 }, { 1, 30, 0 } } }, { { { 11, 0, 2 }, { 4, 24, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 0 }, { 1, 31, 0 } } }, { { { 11, 0, 1 }, { 1, 31, 1 } } }, { { { 11, 0, 2 }, { 2, 30, 1 } } }, { { { 11, 0, 3 }, { 2, 30, 0 } } }, { { { 11, 0, 4 }, { 2, 31, 1 } } }, { { { 12, 0, 4 }, { 2, 31, 0 } } }, { { { 12, 0, 3 }, { 4, 27, 0 } } }, { { { 12, 0, 2 }, { 3, 30, 1 } } }, { { { 12, 0, 1 }, { 3, 30, 0 } } }, { { { 12, 0, 0 }, { 4, 28, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 1 } } }, { { { 12, 0, 2 }, { 3, 31, 0 } } }, { { { 12, 0, 3 }, { 3, 31, 1 } } }, { { { 12, 0, 4 }, { 4, 30, 1 } } }, { { { 13, 0, 3 }, { 4, 30, 0 } } }, { { { 13, 0, 2 }, { 6, 27, 1 } } }, { { { 13, 0, 1 }, { 6, 27, 0 } } }, { { { 13, 0, 0 }, { 4, 31, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 1 } } }, { { { 13, 0, 2 }, { 5, 30, 0 } } }, { { { 13, 0, 3 }, { 8, 24, 0 } } }, { { { 13, 0, 4 }, { 5, 31, 1 } } }, { { { 14, 0, 3 }, { 5, 31, 0 } } }, { { { 14, 0, 2 }, { 5, 31, 1 } } }, { { { 14, 0, 1 }, { 6, 30, 1 } } }, { { { 14, 0, 0 }, { 6, 30, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 1 } } }, { { { 14, 0, 2 }, { 6, 31, 0 } } }, { { { 14, 0, 3 }, { 8, 27, 0 } } }, { { { 14, 0, 4 }, { 7, 30, 1 } } }, { { { 15, 0, 3 }, { 7, 30, 0 } } }, { { { 15, 0, 2 }, { 8, 28, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 0 }, { 7, 31, 0 } } }, { { { 15, 0, 1 }, { 7, 31, 1 } } }, { { { 15, 0, 2 }, { 8, 30, 1 } } }, { { { 15, 0, 3 }, { 8, 30, 0 } } }, { { { 15, 0, 4 }, { 10, 27, 1 } } }, { { { 16, 0, 4 }, { 10, 27, 0 } } }, { { { 16, 0, 3 }, { 8, 31, 0 } } }, { { { 16, 0, 2 }, { 9, 30, 1 } } }, { { { 16, 0, 1 }, { 9, 30, 0 } } }, { { { 16, 0, 0 }, { 12, 24, 0 } } }, { { { 16, 0, 1 }, { 9, 31, 1 } } }, { { { 16, 0, 2 }, { 9, 31, 0 } } }, { { { 16, 0, 3 }, { 9, 31, 1 } } }, { { { 16, 0, 4 }, { 10, 30, 1 } } }, { { { 17, 0, 3 }, { 10, 30, 0 } } }, { { { 17, 0, 2 }, { 10, 31, 1 } } }, { { { 17, 0, 1 }, { 10, 31, 0 } } }, { { { 17, 0, 0 }, { 12, 27, 0 } } }, { { { 17, 0, 1 }, { 11, 30, 1 } } }, { { { 17, 0, 2 }, { 11, 30, 0 } } }, { { { 17, 0, 3 }, { 12, 28, 0 } } }, { { { 17, 0, 4 }, { 11, 31, 1 } } }, { { { 18, 0, 3 }, { 11, 31, 0 } } }, { { { 18, 0, 2 }, { 11, 31, 1 } } }, { { { 18, 0, 1 }, { 12, 30, 1 } } }, { { { 18, 0, 0 }, { 12, 30, 0 } } }, { { { 18, 0, 1 }, { 14, 27, 1 } } }, { { { 18, 0, 2 }, { 14, 27, 0 } } }, { { { 18, 0, 3 }, { 12, 31, 0 } } }, { { { 18, 0, 4 }, { 13, 30, 1 } } }, { { { 19, 0, 3 }, { 13, 30, 0 } } }, { { { 19, 0, 2 }, { 16, 24, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 0 }, { 13, 31, 0 } } }, { { { 19, 0, 1 }, { 13, 31, 1 } } }, { { { 19, 0, 2 }, { 14, 30, 1 } } }, { { { 19, 0, 3 }, { 14, 30, 0 } } }, { { { 19, 0, 4 }, { 14, 31, 1 } } }, { { { 20, 0, 4 }, { 14, 31, 0 } } }, { { { 20, 0, 3 }, { 16, 27, 0 } } }, { { { 20, 0, 2 }, { 15, 30, 1 } } }, { { { 20, 0, 1 }, { 15, 30, 0 } } }, { { { 20, 0, 0 }, { 16, 28, 0 } } }, { { { 20, 0, 1 }, { 15, 31, 1 } } }, { { { 20, 0, 2 }, { 15, 31, 0 } } }, { { { 20, 0, 3 }, { 15, 31, 1 } } }, { { { 20, 0, 4 }, { 16, 30, 1 } } }, { { { 21, 0, 3 }, { 16, 30, 0 } } }, { { { 21, 0, 2 }, { 18, 27, 1 } } }, { { { 21, 0, 1 }, { 18, 27, 0 } } }, { { { 21, 0, 0 }, { 16, 31, 0 } } }, { { { 21, 0, 1 }, { 17, 30, 1 } } }, { { { 21, 0, 2 }, { 17, 30, 0 } } }, { { { 21, 0, 3 }, { 20, 24, 0 } } }, { { { 21, 0, 4 }, { 17, 31, 1 } } }, { { { 22, 0, 3 }, { 17, 31, 0 } } }, { { { 22, 0, 2 }, { 17, 31, 1 } } }, { { { 22, 0, 1 }, { 18, 30, 1 } } }, { { { 22, 0, 0 }, { 18, 30, 0 } } }, { { { 22, 0, 1 }, { 18, 31, 1 } } }, { { { 22, 0, 2 }, { 18, 31, 0 } } }, { { { 22, 0, 3 }, { 20, 27, 0 } } }, { { { 22, 0, 4 }, { 19, 30, 1 } } }, { { { 23, 0, 3 }, { 19, 30, 0 } } }, { { { 23, 0, 2 }, { 20, 28, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 0 }, { 19, 31, 0 } } }, { { { 23, 0, 1 }, { 19, 31, 1 } } }, { { { 23, 0, 2 }, { 20, 30, 1 } } }, { { { 23, 0, 3 }, { 20, 30, 0 } } }, { { { 23, 0, 4 }, { 22, 27, 1 } } }, { { { 24, 0, 4 }, { 22, 27, 0 } } }, { { { 24, 0, 3 }, { 20, 31, 0 } } }, { { { 24, 0, 2 }, { 21, 30, 1 } } }, { { { 24, 0, 1 }, { 21, 30, 0 } } }, { { { 24, 0, 0 }, { 24, 24, 0 } } }, { { { 24, 0, 1 }, { 21, 31, 1 } } }, { { { 24, 0, 2 }, { 21, 31, 0 } } }, { { { 24, 0, 3 }, { 21, 31, 1 } } }, { { { 24, 0, 4 }, { 22, 30, 1 } } }, { { { 25, 0, 3 }, { 22, 30, 0 } } }, { { { 25, 0, 2 }, { 22, 31, 1 } } }, { { { 25, 0, 1 }, { 22, 31, 0 } } }, { { { 25, 0, 0 }, { 24, 27, 0 } } }, { { { 25, 0, 1 }, { 23, 30, 1 } } }, { { { 25, 0, 2 }, { 23, 30, 0 } } }, { { { 25, 0, 3 }, { 24, 28, 0 } } }, { { { 25, 0, 4 }, { 23, 31, 1 } } }, { { { 26, 0, 3 }, { 23, 31, 0 } } }, { { { 26, 0, 2 }, { 23, 31, 1 } } }, { { { 26, 0, 1 }, { 24, 30, 1 } } }, { { { 26, 0, 0 }, { 24, 30, 0 } } }, { { { 26, 0, 1 }, { 26, 27, 1 } } }, { { { 26, 0, 2 }, { 26, 27, 0 } } }, { { { 26, 0, 3 }, { 24, 31, 0 } } }, { { { 26, 0, 4 }, { 25, 30, 1 } } }, { { { 27, 0, 3 }, { 25, 30, 0 } } }, { { { 27, 0, 2 }, { 28, 24, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 0 }, { 25, 31, 0 } } }, { { { 27, 0, 1 }, { 25, 31, 1 } } }, { { { 27, 0, 2 }, { 26, 30, 1 } } }, { { { 27, 0, 3 }, { 26, 30, 0 } } }, { { { 27, 0, 4 }, { 26, 31, 1 } } }, { { { 28, 0, 4 }, { 26, 31, 0 } } }, { { { 28, 0, 3 }, { 28, 27, 0 } } }, { { { 28, 0, 2 }, { 27, 30, 1 } } }, { { { 28, 0, 1 }, { 27, 30, 0 } } }, { { { 28, 0, 0 }, { 28, 28, 0 } } }, { { { 28, 0, 1 }, { 27, 31, 1 } } }, { { { 28, 0, 2 }, { 27, 31, 0 } } }, { { { 28, 0, 3 }, { 27, 31, 1 } } }, { { { 28, 0, 4 }, { 28, 30, 1 } } }, { { { 29, 0, 3 }, { 28, 30, 0 } } }, { { { 29, 0, 2 }, { 30, 27, 1 } } }, { { { 29, 0, 1 }, { 30, 27, 0 } } }, { { { 29, 0, 0 }, { 28, 31, 0 } } }, { { { 29, 0, 1 }, { 29, 30, 1 } } }, { { { 29, 0, 2 }, { 29, 30, 0 } } }, { { { 29, 0, 3 }, { 29, 30, 1 } } }, { { { 29, 0, 4 }, { 29, 31, 1 } } }, { { { 30, 0, 3 }, { 29, 31, 0 } } }, { { { 30, 0, 2 }, { 29, 31, 1 } } }, { { { 30, 0, 1 }, { 30, 30, 1 } } }, { { { 30, 0, 0 }, { 30, 30, 0 } } }, { { { 30, 0, 1 }, { 30, 31, 1 } } }, { { { 30, 0, 2 }, { 30, 31, 0 } } }, { { { 30, 0, 3 }, { 30, 31, 1 } } }, { { { 30, 0, 4 }, { 31, 30, 1 } } }, { { { 31, 0, 3 }, { 31, 30, 0 } } }, { { { 31, 0, 2 }, { 31, 30, 1 } } }, { { { 31, 0, 1 }, { 31, 31, 1 } } }, { { { 31, 0, 0 }, { 31, 31, 0 } } } }; static const DDSSingleColorLookup DDSLookup_6_4[] = { { { { 0, 0, 0 }, { 0, 0, 0 } } }, { { { 0, 0, 1 }, { 0, 1, 0 } } }, { { { 0, 0, 2 }, { 0, 2, 0 } } }, { { { 1, 0, 1 }, { 0, 3, 1 } } }, { { { 1, 0, 0 }, { 0, 3, 0 } } }, { { { 1, 0, 1 }, { 0, 4, 0 } } }, { { { 1, 0, 2 }, { 0, 5, 0 } } }, { { { 2, 0, 1 }, { 0, 6, 1 } } }, { { { 2, 0, 0 }, { 0, 6, 0 } } }, { { { 2, 0, 1 }, { 0, 7, 0 } } }, { { { 2, 0, 2 }, { 0, 8, 0 } } }, { { { 3, 0, 1 }, { 0, 9, 1 } } }, { { { 3, 0, 0 }, { 0, 9, 0 } } }, { { { 3, 0, 1 }, { 0, 10, 0 } } }, { { { 3, 0, 2 }, { 0, 11, 0 } } }, { { { 4, 0, 1 }, { 0, 12, 1 } } }, { { { 4, 0, 0 }, { 0, 12, 0 } } }, { { { 4, 0, 1 }, { 0, 13, 0 } } }, { { { 4, 0, 2 }, { 0, 14, 0 } } }, { { { 5, 0, 1 }, { 0, 15, 1 } } }, { { { 5, 0, 0 }, { 0, 15, 0 } } }, { { { 5, 0, 1 }, { 0, 16, 0 } } }, { { { 5, 0, 2 }, { 1, 15, 0 } } }, { { { 6, 0, 1 }, { 0, 17, 0 } } }, { { { 6, 0, 0 }, { 0, 18, 0 } } }, { { { 6, 0, 1 }, { 0, 19, 0 } } }, { { { 6, 0, 2 }, { 3, 14, 0 } } }, { { { 7, 0, 1 }, { 0, 20, 0 } } }, { { { 7, 0, 0 }, { 0, 21, 0 } } }, { { { 7, 0, 1 }, { 0, 22, 0 } } }, { { { 7, 0, 2 }, { 4, 15, 0 } } }, { { { 8, 0, 1 }, { 0, 23, 0 } } }, { { { 8, 0, 0 }, { 0, 24, 0 } } }, { { { 8, 0, 1 }, { 0, 25, 0 } } }, { { { 8, 0, 2 }, { 6, 14, 0 } } }, { { { 9, 0, 1 }, { 0, 26, 0 } } }, { { { 9, 0, 0 }, { 0, 27, 0 } } }, { { { 9, 0, 1 }, { 0, 28, 0 } } }, { { { 9, 0, 2 }, { 7, 15, 0 } } }, { { { 10, 0, 1 }, { 0, 29, 0 } } }, { { { 10, 0, 0 }, { 0, 30, 0 } } }, { { { 10, 0, 1 }, { 0, 31, 0 } } }, { { { 10, 0, 2 }, { 9, 14, 0 } } }, { { { 11, 0, 1 }, { 0, 32, 0 } } }, { { { 11, 0, 0 }, { 0, 33, 0 } } }, { { { 11, 0, 1 }, { 2, 30, 0 } } }, { { { 11, 0, 2 }, { 0, 34, 0 } } }, { { { 12, 0, 1 }, { 0, 35, 0 } } }, { { { 12, 0, 0 }, { 0, 36, 0 } } }, { { { 12, 0, 1 }, { 3, 31, 0 } } }, { { { 12, 0, 2 }, { 0, 37, 0 } } }, { { { 13, 0, 1 }, { 0, 38, 0 } } }, { { { 13, 0, 0 }, { 0, 39, 0 } } }, { { { 13, 0, 1 }, { 5, 30, 0 } } }, { { { 13, 0, 2 }, { 0, 40, 0 } } }, { { { 14, 0, 1 }, { 0, 41, 0 } } }, { { { 14, 0, 0 }, { 0, 42, 0 } } }, { { { 14, 0, 1 }, { 6, 31, 0 } } }, { { { 14, 0, 2 }, { 0, 43, 0 } } }, { { { 15, 0, 1 }, { 0, 44, 0 } } }, { { { 15, 0, 0 }, { 0, 45, 0 } } }, { { { 15, 0, 1 }, { 8, 30, 0 } } }, { { { 15, 0, 2 }, { 0, 46, 0 } } }, { { { 16, 0, 2 }, { 0, 47, 0 } } }, { { { 16, 0, 1 }, { 1, 46, 0 } } }, { { { 16, 0, 0 }, { 0, 48, 0 } } }, { { { 16, 0, 1 }, { 0, 49, 0 } } }, { { { 16, 0, 2 }, { 0, 50, 0 } } }, { { { 17, 0, 1 }, { 2, 47, 0 } } }, { { { 17, 0, 0 }, { 0, 51, 0 } } }, { { { 17, 0, 1 }, { 0, 52, 0 } } }, { { { 17, 0, 2 }, { 0, 53, 0 } } }, { { { 18, 0, 1 }, { 4, 46, 0 } } }, { { { 18, 0, 0 }, { 0, 54, 0 } } }, { { { 18, 0, 1 }, { 0, 55, 0 } } }, { { { 18, 0, 2 }, { 0, 56, 0 } } }, { { { 19, 0, 1 }, { 5, 47, 0 } } }, { { { 19, 0, 0 }, { 0, 57, 0 } } }, { { { 19, 0, 1 }, { 0, 58, 0 } } }, { { { 19, 0, 2 }, { 0, 59, 0 } } }, { { { 20, 0, 1 }, { 7, 46, 0 } } }, { { { 20, 0, 0 }, { 0, 60, 0 } } }, { { { 20, 0, 1 }, { 0, 61, 0 } } }, { { { 20, 0, 2 }, { 0, 62, 0 } } }, { { { 21, 0, 1 }, { 8, 47, 0 } } }, { { { 21, 0, 0 }, { 0, 63, 0 } } }, { { { 21, 0, 1 }, { 1, 62, 0 } } }, { { { 21, 0, 2 }, { 1, 63, 0 } } }, { { { 22, 0, 1 }, { 10, 46, 0 } } }, { { { 22, 0, 0 }, { 2, 62, 0 } } }, { { { 22, 0, 1 }, { 2, 63, 0 } } }, { { { 22, 0, 2 }, { 3, 62, 0 } } }, { { { 23, 0, 1 }, { 11, 47, 0 } } }, { { { 23, 0, 0 }, { 3, 63, 0 } } }, { { { 23, 0, 1 }, { 4, 62, 0 } } }, { { { 23, 0, 2 }, { 4, 63, 0 } } }, { { { 24, 0, 1 }, { 13, 46, 0 } } }, { { { 24, 0, 0 }, { 5, 62, 0 } } }, { { { 24, 0, 1 }, { 5, 63, 0 } } }, { { { 24, 0, 2 }, { 6, 62, 0 } } }, { { { 25, 0, 1 }, { 14, 47, 0 } } }, { { { 25, 0, 0 }, { 6, 63, 0 } } }, { { { 25, 0, 1 }, { 7, 62, 0 } } }, { { { 25, 0, 2 }, { 7, 63, 0 } } }, { { { 26, 0, 1 }, { 16, 45, 0 } } }, { { { 26, 0, 0 }, { 8, 62, 0 } } }, { { { 26, 0, 1 }, { 8, 63, 0 } } }, { { { 26, 0, 2 }, { 9, 62, 0 } } }, { { { 27, 0, 1 }, { 16, 48, 0 } } }, { { { 27, 0, 0 }, { 9, 63, 0 } } }, { { { 27, 0, 1 }, { 10, 62, 0 } } }, { { { 27, 0, 2 }, { 10, 63, 0 } } }, { { { 28, 0, 1 }, { 16, 51, 0 } } }, { { { 28, 0, 0 }, { 11, 62, 0 } } }, { { { 28, 0, 1 }, { 11, 63, 0 } } }, { { { 28, 0, 2 }, { 12, 62, 0 } } }, { { { 29, 0, 1 }, { 16, 54, 0 } } }, { { { 29, 0, 0 }, { 12, 63, 0 } } }, { { { 29, 0, 1 }, { 13, 62, 0 } } }, { { { 29, 0, 2 }, { 13, 63, 0 } } }, { { { 30, 0, 1 }, { 16, 57, 0 } } }, { { { 30, 0, 0 }, { 14, 62, 0 } } }, { { { 30, 0, 1 }, { 14, 63, 0 } } }, { { { 30, 0, 2 }, { 15, 62, 0 } } }, { { { 31, 0, 1 }, { 16, 60, 0 } } }, { { { 31, 0, 0 }, { 15, 63, 0 } } }, { { { 31, 0, 1 }, { 24, 46, 0 } } }, { { { 31, 0, 2 }, { 16, 62, 0 } } }, { { { 32, 0, 2 }, { 16, 63, 0 } } }, { { { 32, 0, 1 }, { 17, 62, 0 } } }, { { { 32, 0, 0 }, { 25, 47, 0 } } }, { { { 32, 0, 1 }, { 17, 63, 0 } } }, { { { 32, 0, 2 }, { 18, 62, 0 } } }, { { { 33, 0, 1 }, { 18, 63, 0 } } }, { { { 33, 0, 0 }, { 27, 46, 0 } } }, { { { 33, 0, 1 }, { 19, 62, 0 } } }, { { { 33, 0, 2 }, { 19, 63, 0 } } }, { { { 34, 0, 1 }, { 20, 62, 0 } } }, { { { 34, 0, 0 }, { 28, 47, 0 } } }, { { { 34, 0, 1 }, { 20, 63, 0 } } }, { { { 34, 0, 2 }, { 21, 62, 0 } } }, { { { 35, 0, 1 }, { 21, 63, 0 } } }, { { { 35, 0, 0 }, { 30, 46, 0 } } }, { { { 35, 0, 1 }, { 22, 62, 0 } } }, { { { 35, 0, 2 }, { 22, 63, 0 } } }, { { { 36, 0, 1 }, { 23, 62, 0 } } }, { { { 36, 0, 0 }, { 31, 47, 0 } } }, { { { 36, 0, 1 }, { 23, 63, 0 } } }, { { { 36, 0, 2 }, { 24, 62, 0 } } }, { { { 37, 0, 1 }, { 24, 63, 0 } } }, { { { 37, 0, 0 }, { 32, 47, 0 } } }, { { { 37, 0, 1 }, { 25, 62, 0 } } }, { { { 37, 0, 2 }, { 25, 63, 0 } } }, { { { 38, 0, 1 }, { 26, 62, 0 } } }, { { { 38, 0, 0 }, { 32, 50, 0 } } }, { { { 38, 0, 1 }, { 26, 63, 0 } } }, { { { 38, 0, 2 }, { 27, 62, 0 } } }, { { { 39, 0, 1 }, { 27, 63, 0 } } }, { { { 39, 0, 0 }, { 32, 53, 0 } } }, { { { 39, 0, 1 }, { 28, 62, 0 } } }, { { { 39, 0, 2 }, { 28, 63, 0 } } }, { { { 40, 0, 1 }, { 29, 62, 0 } } }, { { { 40, 0, 0 }, { 32, 56, 0 } } }, { { { 40, 0, 1 }, { 29, 63, 0 } } }, { { { 40, 0, 2 }, { 30, 62, 0 } } }, { { { 41, 0, 1 }, { 30, 63, 0 } } }, { { { 41, 0, 0 }, { 32, 59, 0 } } }, { { { 41, 0, 1 }, { 31, 62, 0 } } }, { { { 41, 0, 2 }, { 31, 63, 0 } } }, { { { 42, 0, 1 }, { 32, 61, 0 } } }, { { { 42, 0, 0 }, { 32, 62, 0 } } }, { { { 42, 0, 1 }, { 32, 63, 0 } } }, { { { 42, 0, 2 }, { 41, 46, 0 } } }, { { { 43, 0, 1 }, { 33, 62, 0 } } }, { { { 43, 0, 0 }, { 33, 63, 0 } } }, { { { 43, 0, 1 }, { 34, 62, 0 } } }, { { { 43, 0, 2 }, { 42, 47, 0 } } }, { { { 44, 0, 1 }, { 34, 63, 0 } } }, { { { 44, 0, 0 }, { 35, 62, 0 } } }, { { { 44, 0, 1 }, { 35, 63, 0 } } }, { { { 44, 0, 2 }, { 44, 46, 0 } } }, { { { 45, 0, 1 }, { 36, 62, 0 } } }, { { { 45, 0, 0 }, { 36, 63, 0 } } }, { { { 45, 0, 1 }, { 37, 62, 0 } } }, { { { 45, 0, 2 }, { 45, 47, 0 } } }, { { { 46, 0, 1 }, { 37, 63, 0 } } }, { { { 46, 0, 0 }, { 38, 62, 0 } } }, { { { 46, 0, 1 }, { 38, 63, 0 } } }, { { { 46, 0, 2 }, { 47, 46, 0 } } }, { { { 47, 0, 1 }, { 39, 62, 0 } } }, { { { 47, 0, 0 }, { 39, 63, 0 } } }, { { { 47, 0, 1 }, { 40, 62, 0 } } }, { { { 47, 0, 2 }, { 48, 46, 0 } } }, { { { 48, 0, 2 }, { 40, 63, 0 } } }, { { { 48, 0, 1 }, { 41, 62, 0 } } }, { { { 48, 0, 0 }, { 41, 63, 0 } } }, { { { 48, 0, 1 }, { 48, 49, 0 } } }, { { { 48, 0, 2 }, { 42, 62, 0 } } }, { { { 49, 0, 1 }, { 42, 63, 0 } } }, { { { 49, 0, 0 }, { 43, 62, 0 } } }, { { { 49, 0, 1 }, { 48, 52, 0 } } }, { { { 49, 0, 2 }, { 43, 63, 0 } } }, { { { 50, 0, 1 }, { 44, 62, 0 } } }, { { { 50, 0, 0 }, { 44, 63, 0 } } }, { { { 50, 0, 1 }, { 48, 55, 0 } } }, { { { 50, 0, 2 }, { 45, 62, 0 } } }, { { { 51, 0, 1 }, { 45, 63, 0 } } }, { { { 51, 0, 0 }, { 46, 62, 0 } } }, { { { 51, 0, 1 }, { 48, 58, 0 } } }, { { { 51, 0, 2 }, { 46, 63, 0 } } }, { { { 52, 0, 1 }, { 47, 62, 0 } } }, { { { 52, 0, 0 }, { 47, 63, 0 } } }, { { { 52, 0, 1 }, { 48, 61, 0 } } }, { { { 52, 0, 2 }, { 48, 62, 0 } } }, { { { 53, 0, 1 }, { 56, 47, 0 } } }, { { { 53, 0, 0 }, { 48, 63, 0 } } }, { { { 53, 0, 1 }, { 49, 62, 0 } } }, { { { 53, 0, 2 }, { 49, 63, 0 } } }, { { { 54, 0, 1 }, { 58, 46, 0 } } }, { { { 54, 0, 0 }, { 50, 62, 0 } } }, { { { 54, 0, 1 }, { 50, 63, 0 } } }, { { { 54, 0, 2 }, { 51, 62, 0 } } }, { { { 55, 0, 1 }, { 59, 47, 0 } } }, { { { 55, 0, 0 }, { 51, 63, 0 } } }, { { { 55, 0, 1 }, { 52, 62, 0 } } }, { { { 55, 0, 2 }, { 52, 63, 0 } } }, { { { 56, 0, 1 }, { 61, 46, 0 } } }, { { { 56, 0, 0 }, { 53, 62, 0 } } }, { { { 56, 0, 1 }, { 53, 63, 0 } } }, { { { 56, 0, 2 }, { 54, 62, 0 } } }, { { { 57, 0, 1 }, { 62, 47, 0 } } }, { { { 57, 0, 0 }, { 54, 63, 0 } } }, { { { 57, 0, 1 }, { 55, 62, 0 } } }, { { { 57, 0, 2 }, { 55, 63, 0 } } }, { { { 58, 0, 1 }, { 56, 62, 1 } } }, { { { 58, 0, 0 }, { 56, 62, 0 } } }, { { { 58, 0, 1 }, { 56, 63, 0 } } }, { { { 58, 0, 2 }, { 57, 62, 0 } } }, { { { 59, 0, 1 }, { 57, 63, 1 } } }, { { { 59, 0, 0 }, { 57, 63, 0 } } }, { { { 59, 0, 1 }, { 58, 62, 0 } } }, { { { 59, 0, 2 }, { 58, 63, 0 } } }, { { { 60, 0, 1 }, { 59, 62, 1 } } }, { { { 60, 0, 0 }, { 59, 62, 0 } } }, { { { 60, 0, 1 }, { 59, 63, 0 } } }, { { { 60, 0, 2 }, { 60, 62, 0 } } }, { { { 61, 0, 1 }, { 60, 63, 1 } } }, { { { 61, 0, 0 }, { 60, 63, 0 } } }, { { { 61, 0, 1 }, { 61, 62, 0 } } }, { { { 61, 0, 2 }, { 61, 63, 0 } } }, { { { 62, 0, 1 }, { 62, 62, 1 } } }, { { { 62, 0, 0 }, { 62, 62, 0 } } }, { { { 62, 0, 1 }, { 62, 63, 0 } } }, { { { 62, 0, 2 }, { 63, 62, 0 } } }, { { { 63, 0, 1 }, { 63, 63, 1 } } }, { { { 63, 0, 0 }, { 63, 63, 0 } } } }; static const DDSSingleColorLookup* DDS_LOOKUP[] = { DDSLookup_5_4, DDSLookup_6_4, DDSLookup_5_4 }; /* Macros */ #define C565_r(x) (((x) & 0xF800) >> 11) #define C565_g(x) (((x) & 0x07E0) >> 5) #define C565_b(x) ((x) & 0x001F) #define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2)) #define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4)) #define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2)) #define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1) #define FixRange(min, max, steps) \ if (min > max) \ min = max; \ if ((ssize_t) max - min < steps) \ max = MagickMin(min + steps, 255); \ if ((ssize_t) max - min < steps) \ min = MagickMax(0, (ssize_t) max - steps) #define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z) #define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \ = value #define VectorInit3(vector, value) vector.x = vector.y = vector.z = value #define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \ g && mask.b_bitmask == b && mask.alpha_bitmask == a) /* Forward declarations */ static MagickBooleanType WriteDDSImage(const ImageInfo *,Image *,ExceptionInfo *); static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x + right.x; destination->y = left.y + right.y; destination->z = left.z + right.z; destination->w = left.w + right.w; } static inline void VectorClamp(DDSVector4 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); value->w = MagickMin(1.0f,MagickMax(0.0f,value->w)); } static inline void VectorClamp3(DDSVector3 *value) { value->x = MagickMin(1.0f,MagickMax(0.0f,value->x)); value->y = MagickMin(1.0f,MagickMax(0.0f,value->y)); value->z = MagickMin(1.0f,MagickMax(0.0f,value->z)); } static inline void VectorCopy43(const DDSVector4 source, DDSVector3 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; } static inline void VectorCopy44(const DDSVector4 source, DDSVector4 *destination) { destination->x = source.x; destination->y = source.y; destination->z = source.z; destination->w = source.w; } static inline void VectorNegativeMultiplySubtract(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = c.x - (a.x * b.x); destination->y = c.y - (a.y * b.y); destination->z = c.z - (a.z * b.z); destination->w = c.w - (a.w * b.w); } static inline void VectorMultiply(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; destination->w = left.w * right.w; } static inline void VectorMultiply3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x * right.x; destination->y = left.y * right.y; destination->z = left.z * right.z; } static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; destination->w = (a.w * b.w) + c.w; } static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b, const DDSVector3 c, DDSVector3 *destination) { destination->x = (a.x * b.x) + c.x; destination->y = (a.y * b.y) + c.y; destination->z = (a.z * b.z) + c.z; } static inline void VectorReciprocal(const DDSVector4 value, DDSVector4 *destination) { destination->x = 1.0f / value.x; destination->y = 1.0f / value.y; destination->z = 1.0f / value.z; destination->w = 1.0f / value.w; } static inline void VectorSubtract(const DDSVector4 left, const DDSVector4 right, DDSVector4 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; destination->w = left.w - right.w; } static inline void VectorSubtract3(const DDSVector3 left, const DDSVector3 right, DDSVector3 *destination) { destination->x = left.x - right.x; destination->y = left.y - right.y; destination->z = left.z - right.z; } static inline void VectorTruncate(DDSVector4 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w); } static inline void VectorTruncate3(DDSVector3 *value) { value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x); value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y); value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z); } static inline size_t ClampToLimit(const float value, const size_t limit) { size_t result = (int) (value + 0.5f); if (result < 0.0f) return(0); if (result > limit) return(limit); return result; } static inline size_t ColorTo565(const DDSVector3 point) { size_t r = ClampToLimit(31.0f*point.x,31); size_t g = ClampToLimit(63.0f*point.y,63); size_t b = ClampToLimit(31.0f*point.z,31); return (r << 11) | (g << 5) | b; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s D D S % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsDDS() returns MagickTrue if the image format type, identified by the % magick string, is DDS. % % The format of the IsDDS method is: % % MagickBooleanType IsDDS(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((char *) magick,"DDS ", 4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadDDSImage() reads a DirectDraw Surface image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadDDSImage method is: % % Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info) { size_t hdr_size, required; /* Seek to start of header */ (void) SeekBlob(image, 4, SEEK_SET); /* Check header field */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 124) return MagickFalse; /* Fill in DDS info struct */ dds_info->flags = ReadBlobLSBLong(image); /* Check required flags */ required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); if ((dds_info->flags & required) != required) return MagickFalse; dds_info->height = ReadBlobLSBLong(image); dds_info->width = ReadBlobLSBLong(image); dds_info->pitchOrLinearSize = ReadBlobLSBLong(image); dds_info->depth = ReadBlobLSBLong(image); dds_info->mipmapcount = ReadBlobLSBLong(image); (void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */ /* Read pixel format structure */ hdr_size = ReadBlobLSBLong(image); if (hdr_size != 32) return MagickFalse; dds_info->pixelformat.flags = ReadBlobLSBLong(image); dds_info->pixelformat.fourcc = ReadBlobLSBLong(image); dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image); dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image); dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image); dds_info->ddscaps1 = ReadBlobLSBLong(image); dds_info->ddscaps2 = ReadBlobLSBLong(image); (void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */ /* Read optional DX10 header if available */ if ((dds_info->pixelformat.flags & DDPF_FOURCC) && (dds_info->pixelformat.fourcc == FOURCC_DX10)) { dds_info->extFormat = ReadBlobLSBLong(image); dds_info->extDimension = ReadBlobLSBLong(image); dds_info->extFlags = ReadBlobLSBLong(image); dds_info->extArraySize = ReadBlobLSBLong(image); dds_info->extFlags2 = ReadBlobLSBLong(image); } else { dds_info->extFormat = 0; dds_info->extDimension = 0; dds_info->extFlags = 0; dds_info->extArraySize = 0; dds_info->extFlags2 = 0; } return MagickTrue; } static MagickBooleanType SetDXT1Pixels(Image *image,ssize_t x,ssize_t y, DDSColors colors,size_t bits,Quantum *q) { ssize_t i; ssize_t j; unsigned char code; for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code=(unsigned char) ((bits >> ((j*4+i)*2)) & 0x3); SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); SetPixelOpacity(image,ScaleCharToQuantum(colors.a[code]),q); if ((colors.a[code] != 0) && (image->alpha_trait == UndefinedPixelTrait)) return(MagickFalse); q+=GetPixelChannels(image); } } } return(MagickTrue); } static MagickBooleanType ReadMipmaps(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,DDSPixelDecoder decoder,ExceptionInfo *exception) { MagickBooleanType status; /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } status=MagickTrue; if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { AcquireNextImage(image_info,image,exception); if (image->next == (Image *) NULL) return(MagickFalse); image->next->alpha_trait=image->alpha_trait; image=SyncNextImageInList(image); status=SetImageExtent(image,w,h,exception); if (status == MagickFalse) break; status=decoder(image,dds_info,exception); if (status == MagickFalse) break; if ((w == 1) && (h == 1)) break; w=DIV2(w); h=DIV2(h); } } return(status); } static void CalculateColors(unsigned short c0, unsigned short c1, DDSColors *c, MagickBooleanType ignoreAlpha) { c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0; c->r[0] = (unsigned char) C565_red(c0); c->g[0] = (unsigned char) C565_green(c0); c->b[0] = (unsigned char) C565_blue(c0); c->r[1] = (unsigned char) C565_red(c1); c->g[1] = (unsigned char) C565_green(c1); c->b[1] = (unsigned char) C565_blue(c1); if (ignoreAlpha != MagickFalse || c0 > c1) { c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3); c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3); c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3); c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3); c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3); c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3); } else { c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2); c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2); c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2); c->r[3] = c->g[3] = c->b[3] = 0; c->a[3] = 255; } } static MagickBooleanType ReadDXT1Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t x; size_t bits; ssize_t y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read 8 bytes of data from the image */ c0=ReadBlobLSBShort(image); c1=ReadBlobLSBShort(image); bits=ReadBlobLSBLong(image); CalculateColors(c0,c1,&colors,MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ if (SetDXT1Pixels(image,x,y,colors,bits,q) == MagickFalse) { /* Correct alpha */ SetImageAlpha(image,QuantumRange,exception); q=QueueAuthenticPixels(image,x,y,MagickMin(4,image->columns-x), MagickMin(4,image->rows-y),exception); if (q != (Quantum *) NULL) SetDXT1Pixels(image,x,y,colors,bits,q); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for compressed (DXTn) dds files */ static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info, int texel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageWarning,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)((w+3)/4)*((h+3)/4)*texel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadDXT1(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT1Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT1Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,8,exception)); } static MagickBooleanType ReadDXT3Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; Quantum *q; ssize_t i, x; unsigned char alpha; size_t a0, a1, bits, code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = ReadBlobLSBLong(image); a1 = ReadBlobLSBLong(image); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value: multiply 0..15 by 17 to get range 0..255 */ if (j < 2) alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf); else alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT3(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT3Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT3Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadDXT5Pixels(Image *image, DDSInfo *magick_unused(dds_info),ExceptionInfo *exception) { DDSColors colors; MagickSizeType alpha_bits; Quantum *q; ssize_t i, x; unsigned char a0, a1; size_t alpha, bits, code, alpha_code; ssize_t j, y; unsigned short c0, c1; magick_unreferenced(dds_info); for (y = 0; y < (ssize_t) image->rows; y += 4) { for (x = 0; x < (ssize_t) image->columns; x += 4) { /* Get 4x4 patch of pixels to write on */ q = QueueAuthenticPixels(image, x, y, MagickMin(4, image->columns - x), MagickMin(4, image->rows - y),exception); if (q == (Quantum *) NULL) return(MagickFalse); /* Read alpha values (8 bytes) */ a0 = (unsigned char) ReadBlobByte(image); a1 = (unsigned char) ReadBlobByte(image); alpha_bits = (MagickSizeType)ReadBlobLSBLong(image); alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32); /* Read 8 bytes of data from the image */ c0 = ReadBlobLSBShort(image); c1 = ReadBlobLSBShort(image); bits = ReadBlobLSBLong(image); CalculateColors(c0, c1, &colors, MagickTrue); if (EOFBlob(image) != MagickFalse) return(MagickFalse); /* Write the pixels */ for (j = 0; j < 4; j++) { for (i = 0; i < 4; i++) { if ((x + i) < (ssize_t) image->columns && (y + j) < (ssize_t) image->rows) { code = (bits >> ((4*j+i)*2)) & 0x3; SetPixelRed(image,ScaleCharToQuantum(colors.r[code]),q); SetPixelGreen(image,ScaleCharToQuantum(colors.g[code]),q); SetPixelBlue(image,ScaleCharToQuantum(colors.b[code]),q); /* Extract alpha value */ alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7; if (alpha_code == 0) alpha = a0; else if (alpha_code == 1) alpha = a1; else if (a0 > a1) alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7; else if (alpha_code == 6) alpha = 0; else if (alpha_code == 7) alpha = 255; else alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) alpha),q); q+=GetPixelChannels(image); } } } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); } if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadDXT5(const ImageInfo *image_info,Image *image, DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadDXT5Pixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadDXT5Pixels,exception)); else return(SkipDXTMipmaps(image,dds_info,16,exception)); } static MagickBooleanType ReadUncompressedRGBPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t x, y; unsigned short color; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) SetPixelGray(image,ScaleCharToQuantum(ReadBlobByte(image)),q); else if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G6R5_UNORM) { color=ReadBlobShort(image); SetPixelRed(image,ScaleCharToQuantum((unsigned char) (((color >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 5) >> 10)/63.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); if (dds_info->pixelformat.rgb_bitcount == 32 || dds_info->extFormat == DXGI_FORMAT_B8G8R8X8_UNORM) (void) ReadBlobByte(image); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } /* Skip the mipmap images for uncompressed (RGB or RGBA) dds files */ static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info, int pixel_size,ExceptionInfo *exception) { /* Only skip mipmaps for textures and cube maps */ if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); return(MagickFalse); } if (dds_info->ddscaps1 & DDSCAPS_MIPMAP && (dds_info->ddscaps1 & DDSCAPS_TEXTURE || dds_info->ddscaps2 & DDSCAPS2_CUBEMAP)) { MagickOffsetType offset; ssize_t i; size_t h, w; w=DIV2(dds_info->width); h=DIV2(dds_info->height); /* Mipmapcount includes the main image, so start from one */ for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++) { offset=(MagickOffsetType)w*h*pixel_size; if (SeekBlob(image,offset,SEEK_CUR) < 0) break; w=DIV2(w); h=DIV2(h); if ((w == 1) && (h == 1)) break; } } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGB(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (dds_info->pixelformat.rgb_bitcount == 8 || dds_info->extFormat == DXGI_FORMAT_R8_UNORM) (void) SetImageType(image,GrayscaleType,exception); else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask( dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000)) ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); if (ReadUncompressedRGBPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,3,exception)); } static MagickBooleanType ReadUncompressedRGBAPixels(Image *image, DDSInfo *dds_info,ExceptionInfo *exception) { Quantum *q; ssize_t alphaBits, x, y; unsigned short color; alphaBits=0; if (dds_info->pixelformat.rgb_bitcount == 16) { if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000)) alphaBits=1; else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00)) { alphaBits=2; (void) SetImageType(image,GrayscaleAlphaType,exception); } else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000)) alphaBits=4; else ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported", image->filename); } if (dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) alphaBits=1; for (y = 0; y < (ssize_t) image->rows; y++) { q = QueueAuthenticPixels(image, 0, y, image->columns, 1,exception); if (q == (Quantum *) NULL) return(MagickFalse); for (x = 0; x < (ssize_t) image->columns; x++) { if (dds_info->pixelformat.rgb_bitcount == 16 || dds_info->extFormat == DXGI_FORMAT_B5G5R5A1_UNORM) { color=ReadBlobShort(image); if (alphaBits == 1) { SetPixelAlpha(image,(color & (1 << 15)) ? QuantumRange : 0,q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 1) >> 11)/31.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 6) >> 11)/31.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 11) >> 11)/31.0)*255)),q); } else if (alphaBits == 2) { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (color >> 8)),q); SetPixelGray(image,ScaleCharToQuantum((unsigned char)color),q); } else { SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) (((color >> 12)/15.0)*255)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 4) >> 12)/15.0)*255)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 8) >> 12)/15.0)*255)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ((((unsigned short)(color << 12) >> 12)/15.0)*255)),q); } } else if (dds_info->extFormat == DXGI_FORMAT_R8G8B8A8_UNORM || IsBitMask(dds_info->pixelformat,0x000000ff,0x0000ff00,0x00ff0000,0xff000000)) { SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } else { SetPixelBlue(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelGreen(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelRed(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); SetPixelAlpha(image,ScaleCharToQuantum((unsigned char) ReadBlobByte(image)),q); } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) return(MagickFalse); if (EOFBlob(image) != MagickFalse) return(MagickFalse); } return(MagickTrue); } static MagickBooleanType ReadUncompressedRGBA(const ImageInfo *image_info, Image *image,DDSInfo *dds_info,const MagickBooleanType read_mipmaps, ExceptionInfo *exception) { if (ReadUncompressedRGBAPixels(image,dds_info,exception) == MagickFalse) return(MagickFalse); if (read_mipmaps != MagickFalse) return(ReadMipmaps(image_info,image,dds_info,ReadUncompressedRGBAPixels, exception)); else return(SkipRGBMipmaps(image,dds_info,4,exception)); } static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception) { const char *option; CompressionType compression; DDSInfo dds_info; DDSDecoder *decoder; Image *image; MagickBooleanType status, cubemap, volume, read_mipmaps; PixelTrait alpha_trait; size_t n, num_images; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cubemap=MagickFalse, volume=MagickFalse, read_mipmaps=MagickFalse; image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Initialize image structure. */ if (ReadDDSInfo(image, &dds_info) != MagickTrue) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP) cubemap = MagickTrue; if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0) volume = MagickTrue; /* Determine pixel format */ if (dds_info.pixelformat.flags & DDPF_RGB) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_LUMINANCE) { compression = NoCompression; if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS) { /* Not sure how to handle this */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } else { alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; } } else if (dds_info.pixelformat.flags & DDPF_FOURCC) { switch (dds_info.pixelformat.fourcc) { case FOURCC_DXT1: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case FOURCC_DXT3: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case FOURCC_DXT5: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } case FOURCC_DX10: { if (dds_info.extDimension != DDSEXT_DIMENSION_TEX2D) { ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } switch (dds_info.extFormat) { case DXGI_FORMAT_R8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G6R5_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_B5G5R5A1_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_R8G8B8A8_UNORM: { compression = NoCompression; alpha_trait = BlendPixelTrait; decoder = ReadUncompressedRGBA; break; } case DXGI_FORMAT_B8G8R8X8_UNORM: { compression = NoCompression; alpha_trait = UndefinedPixelTrait; decoder = ReadUncompressedRGB; break; } case DXGI_FORMAT_BC1_UNORM: { alpha_trait = UndefinedPixelTrait; compression = DXT1Compression; decoder = ReadDXT1; break; } case DXGI_FORMAT_BC2_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT3Compression; decoder = ReadDXT3; break; } case DXGI_FORMAT_BC3_UNORM: { alpha_trait = BlendPixelTrait; compression = DXT5Compression; decoder = ReadDXT5; break; } default: { /* Unknown format */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } if (dds_info.extFlags & DDSEXTFLAGS_CUBEMAP) cubemap = MagickTrue; num_images = dds_info.extArraySize; break; } default: { /* Unknown FOURCC */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } } } else { /* Neither compressed nor uncompressed... thus unsupported */ ThrowReaderException(CorruptImageError, "ImageTypeNotSupported"); } num_images = 1; if (cubemap) { /* Determine number of faces defined in the cubemap */ num_images = 0; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++; if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++; } if (volume) num_images = dds_info.depth; if ((num_images == 0) || (num_images > GetBlobSize(image))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireMagickResource(ListLengthResource,num_images) == MagickFalse) ThrowReaderException(ResourceLimitError,"ListLengthExceedsLimit"); option=GetImageOption(image_info,"dds:skip-mipmaps"); if (IsStringFalse(option) != MagickFalse) read_mipmaps=MagickTrue; for (n = 0; n < num_images; n++) { if (n != 0) { /* Start a new image */ if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) return(DestroyImageList(image)); image=SyncNextImageInList(image); } image->alpha_trait=alpha_trait; image->compression=compression; image->columns=dds_info.width; image->rows=dds_info.height; image->storage_class=DirectClass; image->endian=LSBEndian; image->depth=8; if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); (void) SetImageBackgroundColor(image,exception); status=(decoder)(image_info,image,&dds_info,read_mipmaps,exception); if (status == MagickFalse) { (void) CloseBlob(image); if (n == 0) return(DestroyImageList(image)); return(GetFirstImageInList(image)); } } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterDDSImage() adds attributes for the DDS image format to % the list of supported formats. The attributes include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterDDSImage method is: % % RegisterDDSImage(void) % */ ModuleExport size_t RegisterDDSImage(void) { MagickInfo *entry; entry = AcquireMagickInfo("DDS","DDS","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT1","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry = AcquireMagickInfo("DDS","DXT5","Microsoft DirectDraw Surface"); entry->decoder = (DecodeImageHandler *) ReadDDSImage; entry->encoder = (EncodeImageHandler *) WriteDDSImage; entry->magick = (IsImageFormatHandler *) IsDDS; entry->flags|=CoderDecoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } static void RemapIndices(const ssize_t *map, const unsigned char *source, unsigned char *target) { ssize_t i; for (i = 0; i < 16; i++) { if (map[i] == -1) target[i] = 3; else target[i] = source[map[i]]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterDDSImage() removes format registrations made by the % DDS module from the list of supported formats. % % The format of the UnregisterDDSImage method is: % % UnregisterDDSImage(void) % */ ModuleExport void UnregisterDDSImage(void) { (void) UnregisterMagickInfo("DDS"); (void) UnregisterMagickInfo("DXT1"); (void) UnregisterMagickInfo("DXT5"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e D D S I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format. % % The format of the WriteBMPImage method is: % % MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % */ static size_t CompressAlpha(const size_t min, const size_t max, const size_t steps, const ssize_t *alphas, unsigned char* indices) { unsigned char codes[8]; ssize_t i; size_t error, index, j, least, value; codes[0] = (unsigned char) min; codes[1] = (unsigned char) max; codes[6] = 0; codes[7] = 255; for (i=1; i < (ssize_t) steps; i++) codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps); error = 0; for (i=0; i<16; i++) { if (alphas[i] == -1) { indices[i] = 0; continue; } value = alphas[i]; least = SIZE_MAX; index = 0; for (j=0; j<8; j++) { size_t dist; dist = value - (size_t)codes[j]; dist *= dist; if (dist < least) { least = dist; index = j; } } indices[i] = (unsigned char)index; error += least; } return error; } static MagickBooleanType ConstructOrdering(const size_t count, const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights, DDSVector4 *xSumwSum, unsigned char *order, size_t iteration) { float dps[16], f; ssize_t i; size_t j; unsigned char c, *o, *p; o = order + (16*iteration); for (i=0; i < (ssize_t) count; i++) { dps[i] = Dot(points[i],axis); o[i] = (unsigned char)i; } for (i=0; i < (ssize_t) count; i++) { for (j=i; j > 0 && dps[j] < dps[j - 1]; j--) { f = dps[j]; dps[j] = dps[j - 1]; dps[j - 1] = f; c = o[j]; o[j] = o[j - 1]; o[j - 1] = c; } } for (i=0; i < (ssize_t) iteration; i++) { MagickBooleanType same; p = order + (16*i); same = MagickTrue; for (j=0; j < count; j++) { if (o[j] != p[j]) { same = MagickFalse; break; } } if (same != MagickFalse) return MagickFalse; } xSumwSum->x = 0; xSumwSum->y = 0; xSumwSum->z = 0; xSumwSum->w = 0; for (i=0; i < (ssize_t) count; i++) { DDSVector4 v; j = (size_t) o[i]; v.x = points[j].w * points[j].x; v.y = points[j].w * points[j].y; v.z = points[j].w * points[j].z; v.w = points[j].w * 1.0f; VectorCopy44(v,&pointsWeights[i]); VectorAdd(*xSumwSum,v,xSumwSum); } return MagickTrue; } static void CompressClusterFit(const size_t count, const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3* end, unsigned char *indices) { DDSVector3 axis; DDSVector4 grid, gridrcp, half, onethird_onethird2, pointsWeights[16], two, twonineths, twothirds_twothirds2, xSumwSum; float bestError = 1e+37f; size_t bestIteration = 0, besti = 0, bestj = 0, bestk = 0, iterationIndex; ssize_t i; unsigned char *o, order[128], unordered[16]; VectorInit(half,0.5f); VectorInit(two,2.0f); VectorInit(onethird_onethird2,1.0f/3.0f); onethird_onethird2.w = 1.0f/9.0f; VectorInit(twothirds_twothirds2,2.0f/3.0f); twothirds_twothirds2.w = 4.0f/9.0f; VectorInit(twonineths,2.0f/9.0f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; grid.w = 0.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; gridrcp.w = 0.0f; xSumwSum.x = 0.0f; xSumwSum.y = 0.0f; xSumwSum.z = 0.0f; xSumwSum.w = 0.0f; ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0); for (iterationIndex = 0;;) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,1) \ num_threads(GetMagickResourceLimit(ThreadResource)) #endif for (i=0; i < (ssize_t) count; i++) { DDSVector4 part0, part1, part2; size_t ii, j, k, kmin; VectorInit(part0,0.0f); for(ii=0; ii < (size_t) i; ii++) VectorAdd(pointsWeights[ii],part0,&part0); VectorInit(part1,0.0f); for (j=(size_t) i;;) { if (j == 0) { VectorCopy44(pointsWeights[0],&part2); kmin = 1; } else { VectorInit(part2,0.0f); kmin = j; } for (k=kmin;;) { DDSVector4 a, alpha2_sum, alphax_sum, alphabeta_sum, b, beta2_sum, betax_sum, e1, e2, factor, part3; float error; VectorSubtract(xSumwSum,part2,&part3); VectorSubtract(part3,part1,&part3); VectorSubtract(part3,part0,&part3); VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum); VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum); VectorInit(alpha2_sum,alphax_sum.w); VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum); VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum); VectorInit(beta2_sum,betax_sum.w); VectorAdd(part1,part2,&alphabeta_sum); VectorInit(alphabeta_sum,alphabeta_sum.w); VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum); VectorMultiply(alpha2_sum,beta2_sum,&factor); VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor, &factor); VectorReciprocal(factor,&factor); VectorMultiply(alphax_sum,beta2_sum,&a); VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a); VectorMultiply(a,factor,&a); VectorMultiply(betax_sum,alpha2_sum,&b); VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b); VectorMultiply(b,factor,&b); VectorClamp(&a); VectorMultiplyAdd(grid,a,half,&a); VectorTruncate(&a); VectorMultiply(a,gridrcp,&a); VectorClamp(&b); VectorMultiplyAdd(grid,b,half,&b); VectorTruncate(&b); VectorMultiply(b,gridrcp,&b); VectorMultiply(b,b,&e1); VectorMultiply(e1,beta2_sum,&e1); VectorMultiply(a,a,&e2); VectorMultiplyAdd(e2,alpha2_sum,e1,&e1); VectorMultiply(a,b,&e2); VectorMultiply(e2,alphabeta_sum,&e2); VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2); VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2); VectorMultiplyAdd(two,e2,e1,&e2); VectorMultiply(e2,metric,&e2); error = e2.x + e2.y + e2.z; if (error < bestError) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (DDS_CompressClusterFit) #endif { if (error < bestError) { VectorCopy43(a,start); VectorCopy43(b,end); bestError = error; besti = i; bestj = j; bestk = k; bestIteration = iterationIndex; } } } if (k == count) break; VectorAdd(pointsWeights[k],part2,&part2); k++; } if (j == count) break; VectorAdd(pointsWeights[j],part1,&part1); j++; } } if (bestIteration != iterationIndex) break; iterationIndex++; if (iterationIndex == 8) break; VectorSubtract3(*end,*start,&axis); if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order, iterationIndex) == MagickFalse) break; } o = order + (16*bestIteration); for (i=0; i < (ssize_t) besti; i++) unordered[o[i]] = 0; for (i=besti; i < (ssize_t) bestj; i++) unordered[o[i]] = 2; for (i=bestj; i < (ssize_t) bestk; i++) unordered[o[i]] = 3; for (i=bestk; i < (ssize_t) count; i++) unordered[o[i]] = 1; RemapIndices(map,unordered,indices); } static void CompressRangeFit(const size_t count, const DDSVector4* points, const ssize_t *map, const DDSVector3 principle, const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end, unsigned char *indices) { float d, bestDist, max, min, val; DDSVector3 codes[4], grid, gridrcp, half, dist; ssize_t i; size_t bestj, j; unsigned char closest[16]; VectorInit3(half,0.5f); grid.x = 31.0f; grid.y = 63.0f; grid.z = 31.0f; gridrcp.x = 1.0f/31.0f; gridrcp.y = 1.0f/63.0f; gridrcp.z = 1.0f/31.0f; if (count > 0) { VectorCopy43(points[0],start); VectorCopy43(points[0],end); min = max = Dot(points[0],principle); for (i=1; i < (ssize_t) count; i++) { val = Dot(points[i],principle); if (val < min) { VectorCopy43(points[i],start); min = val; } else if (val > max) { VectorCopy43(points[i],end); max = val; } } } VectorClamp3(start); VectorMultiplyAdd3(grid,*start,half,start); VectorTruncate3(start); VectorMultiply3(*start,gridrcp,start); VectorClamp3(end); VectorMultiplyAdd3(grid,*end,half,end); VectorTruncate3(end); VectorMultiply3(*end,gridrcp,end); codes[0] = *start; codes[1] = *end; codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f)); codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f)); codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f)); codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f)); codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f)); codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f)); for (i=0; i < (ssize_t) count; i++) { bestDist = 1e+37f; bestj = 0; for (j=0; j < 4; j++) { dist.x = (points[i].x - codes[j].x) * metric.x; dist.y = (points[i].y - codes[j].y) * metric.y; dist.z = (points[i].z - codes[j].z) * metric.z; d = Dot(dist,dist); if (d < bestDist) { bestDist = d; bestj = j; } } closest[i] = (unsigned char) bestj; } RemapIndices(map, closest, indices); } static void ComputeEndPoints(const DDSSingleColorLookup *lookup[], const unsigned char *color, DDSVector3 *start, DDSVector3 *end, unsigned char *index) { ssize_t i; size_t c, maxError = SIZE_MAX; for (i=0; i < 2; i++) { const DDSSourceBlock* sources[3]; size_t error = 0; for (c=0; c < 3; c++) { sources[c] = &lookup[c][color[c]].sources[i]; error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error); } if (error > maxError) continue; start->x = (float) sources[0]->start / 31.0f; start->y = (float) sources[1]->start / 63.0f; start->z = (float) sources[2]->start / 31.0f; end->x = (float) sources[0]->end / 31.0f; end->y = (float) sources[1]->end / 63.0f; end->z = (float) sources[2]->end / 31.0f; *index = (unsigned char) (2*i); maxError = error; } } static void ComputePrincipleComponent(const float *covariance, DDSVector3 *principle) { DDSVector4 row0, row1, row2, v; ssize_t i; row0.x = covariance[0]; row0.y = covariance[1]; row0.z = covariance[2]; row0.w = 0.0f; row1.x = covariance[1]; row1.y = covariance[3]; row1.z = covariance[4]; row1.w = 0.0f; row2.x = covariance[2]; row2.y = covariance[4]; row2.z = covariance[5]; row2.w = 0.0f; VectorInit(v,1.0f); for (i=0; i < 8; i++) { DDSVector4 w; float a; w.x = row0.x * v.x; w.y = row0.y * v.x; w.z = row0.z * v.x; w.w = row0.w * v.x; w.x = (row1.x * v.y) + w.x; w.y = (row1.y * v.y) + w.y; w.z = (row1.z * v.y) + w.z; w.w = (row1.w * v.y) + w.w; w.x = (row2.x * v.z) + w.x; w.y = (row2.y * v.z) + w.y; w.z = (row2.z * v.z) + w.z; w.w = (row2.w * v.z) + w.w; a = (float) PerceptibleReciprocal(MagickMax(w.x,MagickMax(w.y,w.z))); v.x = w.x * a; v.y = w.y * a; v.z = w.z * a; v.w = w.w * a; } VectorCopy43(v,principle); } static void ComputeWeightedCovariance(const size_t count, const DDSVector4 *points, float *covariance) { DDSVector3 centroid; float total; size_t i; total = 0.0f; VectorInit3(centroid,0.0f); for (i=0; i < count; i++) { total += points[i].w; centroid.x += (points[i].x * points[i].w); centroid.y += (points[i].y * points[i].w); centroid.z += (points[i].z * points[i].w); } if( total > 1.192092896e-07F) { centroid.x /= total; centroid.y /= total; centroid.z /= total; } for (i=0; i < 6; i++) covariance[i] = 0.0f; for (i = 0; i < count; i++) { DDSVector3 a, b; a.x = points[i].x - centroid.x; a.y = points[i].y - centroid.y; a.z = points[i].z - centroid.z; b.x = points[i].w * a.x; b.y = points[i].w * a.y; b.z = points[i].w * a.z; covariance[0] += a.x*b.x; covariance[1] += a.x*b.y; covariance[2] += a.x*b.z; covariance[3] += a.y*b.y; covariance[4] += a.y*b.z; covariance[5] += a.z*b.z; } } static void WriteAlphas(Image *image, const ssize_t *alphas, size_t min5, size_t max5, size_t min7, size_t max7) { ssize_t i; size_t err5, err7, j; unsigned char indices5[16], indices7[16]; FixRange(min5,max5,5); err5 = CompressAlpha(min5,max5,5,alphas,indices5); FixRange(min7,max7,7); err7 = CompressAlpha(min7,max7,7,alphas,indices7); if (err7 < err5) { for (i=0; i < 16; i++) { unsigned char index; index = indices7[i]; if( index == 0 ) indices5[i] = 1; else if (index == 1) indices5[i] = 0; else indices5[i] = 9 - index; } min5 = max7; max5 = min7; } (void) WriteBlobByte(image,(unsigned char) min5); (void) WriteBlobByte(image,(unsigned char) max5); for(i=0; i < 2; i++) { size_t value = 0; for (j=0; j < 8; j++) { size_t index = (size_t) indices5[j + i*8]; value |= ( index << 3*j ); } for (j=0; j < 3; j++) { size_t byte = (value >> 8*j) & 0xff; (void) WriteBlobByte(image,(unsigned char) byte); } } } static void WriteIndices(Image *image, const DDSVector3 start, const DDSVector3 end, unsigned char *indices) { ssize_t i; size_t a, b; unsigned char remapped[16]; const unsigned char *ind; a = ColorTo565(start); b = ColorTo565(end); for (i=0; i<16; i++) { if( a < b ) remapped[i] = (indices[i] ^ 0x1) & 0x3; else if( a == b ) remapped[i] = 0; else remapped[i] = indices[i]; } if( a < b ) Swap(a,b); (void) WriteBlobByte(image,(unsigned char) (a & 0xff)); (void) WriteBlobByte(image,(unsigned char) (a >> 8)); (void) WriteBlobByte(image,(unsigned char) (b & 0xff)); (void) WriteBlobByte(image,(unsigned char) (b >> 8)); for (i=0; i<4; i++) { ind = remapped + 4*i; (void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) | (ind[3] << 6)); } } static void WriteCompressed(Image *image, const size_t count, DDSVector4 *points, const ssize_t *map, const MagickBooleanType clusterFit) { float covariance[16]; DDSVector3 end, principle, start; DDSVector4 metric; unsigned char indices[16]; VectorInit(metric,1.0f); VectorInit3(start,0.0f); VectorInit3(end,0.0f); ComputeWeightedCovariance(count,points,covariance); ComputePrincipleComponent(covariance,&principle); if ((clusterFit == MagickFalse) || (count == 0)) CompressRangeFit(count,points,map,principle,metric,&start,&end,indices); else CompressClusterFit(count,points,map,principle,metric,&start,&end,indices); WriteIndices(image,start,end,indices); } static void WriteSingleColorFit(Image *image, const DDSVector4 *points, const ssize_t *map) { DDSVector3 start, end; ssize_t i; unsigned char color[3], index, indexes[16], indices[16]; color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255); color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255); color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255); index=0; ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index); for (i=0; i< 16; i++) indexes[i]=index; RemapIndices(map,indexes,indices); WriteIndices(image,start,end,indices); } static void WriteFourCC(Image *image, const size_t compression, const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { ssize_t x; ssize_t i, y, bx, by; const Quantum *p; for (y=0; y < (ssize_t) image->rows; y+=4) { for (x=0; x < (ssize_t) image->columns; x+=4) { MagickBooleanType match; DDSVector4 point, points[16]; size_t count = 0, max5 = 0, max7 = 0, min5 = 255, min7 = 255, columns = 4, rows = 4; ssize_t alphas[16], map[16]; unsigned char alpha; if (x + columns >= image->columns) columns = image->columns - x; if (y + rows >= image->rows) rows = image->rows - y; p=GetVirtualPixels(image,x,y,columns,rows,exception); if (p == (const Quantum *) NULL) break; for (i=0; i<16; i++) { map[i] = -1; alphas[i] = -1; } for (by=0; by < (ssize_t) rows; by++) { for (bx=0; bx < (ssize_t) columns; bx++) { if (compression == FOURCC_DXT5) alpha = ScaleQuantumToChar(GetPixelAlpha(image,p)); else alpha = 255; if (compression == FOURCC_DXT5) { if (alpha < min7) min7 = alpha; if (alpha > max7) max7 = alpha; if (alpha != 0 && alpha < min5) min5 = alpha; if (alpha != 255 && alpha > max5) max5 = alpha; } alphas[4*by + bx] = (size_t)alpha; point.x = (float)ScaleQuantumToChar(GetPixelRed(image,p)) / 255.0f; point.y = (float)ScaleQuantumToChar(GetPixelGreen(image,p)) / 255.0f; point.z = (float)ScaleQuantumToChar(GetPixelBlue(image,p)) / 255.0f; point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f; p+=GetPixelChannels(image); match = MagickFalse; for (i=0; i < (ssize_t) count; i++) { if ((points[i].x == point.x) && (points[i].y == point.y) && (points[i].z == point.z) && (alpha >= 128 || compression == FOURCC_DXT5)) { points[i].w += point.w; map[4*by + bx] = i; match = MagickTrue; break; } } if (match != MagickFalse) continue; points[count].x = point.x; points[count].y = point.y; points[count].z = point.z; points[count].w = point.w; map[4*by + bx] = count; count++; } } for (i=0; i < (ssize_t) count; i++) points[i].w = sqrt(points[i].w); if (compression == FOURCC_DXT5) WriteAlphas(image,alphas,min5,max5,min7,max7); if (count == 1) WriteSingleColorFit(image,points,map); else WriteCompressed(image,count,points,map,clusterFit); } } } static void WriteUncompressed(Image *image, ExceptionInfo *exception) { const Quantum *p; ssize_t x; ssize_t y; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(image,p))); (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(image,p))); if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(image,p))); p+=GetPixelChannels(image); } } } static void WriteImageData(Image *image, const size_t pixelFormat, const size_t compression,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha, ExceptionInfo *exception) { if (pixelFormat == DDPF_FOURCC) WriteFourCC(image,compression,clusterFit,weightByAlpha,exception); else WriteUncompressed(image,exception); } static MagickBooleanType WriteMipmaps(Image *image,const ImageInfo *image_info, const size_t pixelFormat,const size_t compression,const size_t mipmaps, const MagickBooleanType fromlist,const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,ExceptionInfo *exception) { const char *option; Image *mipmap_image, *resize_image; MagickBooleanType fast_mipmaps, status; ssize_t i; size_t columns, rows; columns=DIV2(image->columns); rows=DIV2(image->rows); option=GetImageOption(image_info,"dds:fast-mipmaps"); fast_mipmaps=IsStringTrue(option); mipmap_image=image; resize_image=image; status=MagickTrue; for (i=0; i < (ssize_t) mipmaps; i++) { if (fromlist == MagickFalse) { mipmap_image=ResizeImage(resize_image,columns,rows,TriangleFilter, exception); if (mipmap_image == (Image *) NULL) { status=MagickFalse; break; } } else { mipmap_image=mipmap_image->next; if ((mipmap_image->columns != columns) || (mipmap_image->rows != rows)) ThrowBinaryException(CoderError,"ImageColumnOrRowSizeIsNotSupported", image->filename); } DestroyBlob(mipmap_image); mipmap_image->blob=ReferenceBlob(image->blob); WriteImageData(mipmap_image,pixelFormat,compression,weightByAlpha, clusterFit,exception); if (fromlist == MagickFalse) { if (fast_mipmaps == MagickFalse) mipmap_image=DestroyImage(mipmap_image); else { if (resize_image != image) resize_image=DestroyImage(resize_image); resize_image=mipmap_image; } } columns=DIV2(columns); rows=DIV2(rows); } if (resize_image != image) resize_image=DestroyImage(resize_image); return(status); } static void WriteDDSInfo(Image *image, const size_t pixelFormat, const size_t compression, const size_t mipmaps) { char software[MagickPathExtent]; ssize_t i; unsigned int format, caps, flags; flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT); caps=(unsigned int) DDSCAPS_TEXTURE; format=(unsigned int) pixelFormat; if (format == DDPF_FOURCC) flags=flags | DDSD_LINEARSIZE; else flags=flags | DDSD_PITCH; if (mipmaps > 0) { flags=flags | (unsigned int) DDSD_MIPMAPCOUNT; caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX); } if (format != DDPF_FOURCC && image->alpha_trait != UndefinedPixelTrait) format=format | DDPF_ALPHAPIXELS; (void) WriteBlob(image,4,(unsigned char *) "DDS "); (void) WriteBlobLSBLong(image,124); (void) WriteBlobLSBLong(image,flags); (void) WriteBlobLSBLong(image,(unsigned int) image->rows); (void) WriteBlobLSBLong(image,(unsigned int) image->columns); if (pixelFormat == DDPF_FOURCC) { /* Compressed DDS requires linear compressed size of first image */ if (compression == FOURCC_DXT1) (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*8)); else /* DXT5 */ (void) WriteBlobLSBLong(image,(unsigned int) (MagickMax(1, (image->columns+3)/4)*MagickMax(1,(image->rows+3)/4)*16)); } else { /* Uncompressed DDS requires byte pitch of first image */ if (image->alpha_trait != UndefinedPixelTrait) (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 4)); else (void) WriteBlobLSBLong(image,(unsigned int) (image->columns * 3)); } (void) WriteBlobLSBLong(image,0x00); (void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1); (void) memset(software,0,sizeof(software)); (void) CopyMagickString(software,"IMAGEMAGICK",MagickPathExtent); (void) WriteBlob(image,44,(unsigned char *) software); (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,format); if (pixelFormat == DDPF_FOURCC) { (void) WriteBlobLSBLong(image,(unsigned int) compression); for(i=0;i < 5;i++) /* bitcount / masks */ (void) WriteBlobLSBLong(image,0x00); } else { (void) WriteBlobLSBLong(image,0x00); if (image->alpha_trait != UndefinedPixelTrait) { (void) WriteBlobLSBLong(image,32); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0xff000000); } else { (void) WriteBlobLSBLong(image,24); (void) WriteBlobLSBLong(image,0xff0000); (void) WriteBlobLSBLong(image,0xff00); (void) WriteBlobLSBLong(image,0xff); (void) WriteBlobLSBLong(image,0x00); } } (void) WriteBlobLSBLong(image,caps); for(i=0;i < 4;i++) /* ddscaps2 + reserved region */ (void) WriteBlobLSBLong(image,0x00); } static MagickBooleanType WriteDDSImage(const ImageInfo *image_info, Image *image, ExceptionInfo *exception) { const char *option; size_t compression, columns, maxMipmaps, mipmaps, pixelFormat, rows; MagickBooleanType clusterFit, fromlist, status, weightByAlpha; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); pixelFormat=DDPF_FOURCC; compression=FOURCC_DXT5; if (image->alpha_trait == UndefinedPixelTrait) compression=FOURCC_DXT1; if (LocaleCompare(image_info->magick,"dxt1") == 0) compression=FOURCC_DXT1; if (image_info->compression == DXT1Compression) compression=FOURCC_DXT1; else if (image_info->compression == NoCompression) pixelFormat=DDPF_RGB; option=GetImageOption(image_info,"dds:compression"); if (option != (char *) NULL) { if (LocaleCompare(option,"dxt1") == 0) compression=FOURCC_DXT1; if (LocaleCompare(option,"none") == 0) pixelFormat=DDPF_RGB; } clusterFit=MagickFalse; weightByAlpha=MagickFalse; if (pixelFormat == DDPF_FOURCC) { option=GetImageOption(image_info,"dds:cluster-fit"); if (IsStringTrue(option) != MagickFalse) { clusterFit=MagickTrue; if (compression != FOURCC_DXT1) { option=GetImageOption(image_info,"dds:weight-by-alpha"); if (IsStringTrue(option) != MagickFalse) weightByAlpha=MagickTrue; } } } mipmaps=0; fromlist=MagickFalse; option=GetImageOption(image_info,"dds:mipmaps"); if (option != (char *) NULL) { if (LocaleNCompare(option,"fromlist",8) == 0) { Image *next; fromlist=MagickTrue; next=image->next; while(next != (Image *) NULL) { mipmaps++; next=next->next; } } } if ((mipmaps == 0) && ((image->columns & (image->columns - 1)) == 0) && ((image->rows & (image->rows - 1)) == 0)) { maxMipmaps=SIZE_MAX; if (option != (char *) NULL) maxMipmaps=StringToUnsignedLong(option); if (maxMipmaps != 0) { columns=image->columns; rows=image->rows; while ((columns != 1 || rows != 1) && mipmaps != maxMipmaps) { columns=DIV2(columns); rows=DIV2(rows); mipmaps++; } } } option=GetImageOption(image_info,"dds:raw"); if (IsStringTrue(option) == MagickFalse) WriteDDSInfo(image,pixelFormat,compression,mipmaps); else mipmaps=0; WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha, exception); if ((mipmaps > 0) && (WriteMipmaps(image,image_info,pixelFormat,compression, mipmaps,fromlist,clusterFit,weightByAlpha,exception) == MagickFalse)) return(MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
rnn_impl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file rnn_impl.h * \brief * \author Shu Zhang */ #ifndef MXNET_OPERATOR_RNN_IMPL_H_ #define MXNET_OPERATOR_RNN_IMPL_H_ #include <dmlc/logging.h> #include <dmlc/parameter.h> #include <mxnet/operator.h> #include <algorithm> #include <map> #include <vector> #include <string> #include <utility> #include "./math.h" #include "./math_functions-inl.h" #include "./operator_common.h" #include "./mshadow_op.h" #include "./linalg.h" namespace mxnet { namespace op { template<typename DType> inline DType sigmoid(DType x) { return 1.0f / (1.0f + exp(-x)); } template<typename DType> inline DType relu(DType x) { return x > 0.0f ? static_cast<float>(x) : 0.0f; } template<typename DType> void LstmForwardTrainingSingleLayer(DType* ws, DType* rs, bool state_outputs, bool bid, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, const Tensor<cpu, 2, DType> &cx, const Tensor<cpu, 3, DType> &y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); const Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, 4 * H)); const Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, 4 * H)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); DType *c_ptr = bid ? rs + T * N * H * 7 : rs; Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta = 0.0; const index_t cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (index_t i = 0; i < T; ++i) { index_t t = bid ? T - 1 - i : i; linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { index_t j = jk / H; index_t k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[i-1][j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); h[j][k] = ht; // reserve y[t][j][k + offset] = ht; c[i][j][k] = ct; ifgo[i][j][k][0] = it; ifgo[i][j][k][1] = ft; ifgo[i][j][k][2] = gt; ifgo[i][j][k][3] = ot; if (i == T - 1 && state_outputs) { hy_ptr[jk] = ht; cy_ptr[jk] = ct; } } } } template <typename DType> void LstmForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr, const float dropout) { DType* dropout_random = rs; DType* rs2 = dropout_random + (L - 1) * D * T * N * H; const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t r_size = D * T * N * H * 6; const index_t y_offset = T * N * H * 5; const index_t cell_size = N * H; unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) int idx = 0; // state & cell state's idx; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int i = 0; i < L; ++i) { const index_t input_size = i ? H * D : I; const index_t w_size = (input_size + H) * H * 4; Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(rs2 + y_offset, Shape3(T, N, H * D)); LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, false, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } LstmForwardTrainingSingleLayer<DType>(ws, rs2, state_outputs, true, T, N, input_size, H, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; if (dropout > 0.0f) { #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < T * N * H * D; j++) { int rand_data = rand_r(&seed_); if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) { dropout_random[i * T * N * H * D + j] = 0; y.dptr_[j] = 0; } else { dropout_random[i * T * N * H * D + j] = 1.0f - dropout; y.dptr_[j] = y.dptr_[j] / (1.0f - dropout); } } } x_ptr = y.dptr_; rs2 += r_size; ++idx; if (state_outputs) { hy_ptr += cell_size; cy_ptr += cell_size; } } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = (rs2 + y_offset)[i]; } } template<typename DType> void LstmForwardInferenceSingleLayer(DType* ws, bool state_outputs, bool bid, const index_t T, const index_t N, const index_t I, const int H, const int P, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, const Tensor<cpu, 2, DType> &cx, const Tensor<cpu, 3, DType> &y, DType* w_ptr, DType* b_ptr, DType* hy_ptr, DType* cy_ptr) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, (P ? P : H))); Tensor<cpu, 2, DType> whr(w_ptr, Shape2(1, 1)); if (P > 0) whr = Tensor<cpu, 2, DType>(wh.dptr_ + P * 4 * H, Shape2(P, H)); const Tensor<cpu, 2, DType> bx(b_ptr, Shape2(4, H)); const Tensor<cpu, 2, DType> bh(b_ptr + H * 4, Shape2(4, H)); Tensor<cpu, 2, DType> yx_flat(ws, Shape2(T * N, H * 4)); Tensor<cpu, 2, DType> yh_flat(ws + T * N * H * 4, Shape2(N, H * 4)); const Tensor<cpu, 4, DType> yx(yx_flat.dptr_, Shape4(T, N, 4, H)); const Tensor<cpu, 3, DType> yh(yh_flat.dptr_, Shape3(N, 4, H)); Tensor<cpu, 2, DType> h(yh_flat.dptr_ + N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> c(h.dptr_ + N * H, Shape2(N, H)); Tensor<cpu, 2, DType> r(hy_ptr, Shape2(1, 1)); if (P > 0) r = Tensor<cpu, 2, DType>(hy_ptr, Shape2(N, P)); const int offset = bid ? H : 0; const int proj_offset = bid ? P : 0; const DType alpha = 1.0; const DType beta = 0.0; const index_t cell_size = N * H; linalg_gemm(x, wx, yx_flat, alpha, beta, false, true); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (index_t i = 0; i < T; ++i) { index_t t = bid ? T - 1 - i : i; if (P > 0) { linalg_gemm(i ? r : hx, wh, yh_flat, alpha, beta, false, true); } else { linalg_gemm(i ? h : hx, wh, yh_flat, alpha, beta, false, true); } #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { int j = jk / H; int k = jk % H; DType it = sigmoid<DType>(yx[t][j][0][k] + yh[j][0][k] + bx[0][k] + bh[0][k]); DType ft = sigmoid<DType>(yx[t][j][1][k] + yh[j][1][k] + bx[1][k] + bh[1][k]); DType gt = tanh(yx[t][j][2][k] + yh[j][2][k] + bx[2][k] + bh[2][k]); DType ot = sigmoid<DType>(yx[t][j][3][k] + yh[j][3][k] + bx[3][k] + bh[3][k]); DType ct = (i ? c[j][k] : cx[j][k]) * ft + it * gt; DType ht = ot * tanh(ct); if (P == 0) y[t][j][k + offset] = ht; if (i == T - 1 && state_outputs) { if (P == 0) hy_ptr[jk] = ht; cy_ptr[jk] = ct; } else { c[j][k] = ct; } h[j][k] = ht; } if (P > 0) { linalg_gemm(h, whr, r, alpha, beta, false, true); #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif #pragma omp parallel for num_threads(omp_threads) for (int j = 0; j < N; ++j) { std::memcpy(y[t][j].dptr_ + proj_offset, r[j].dptr_, P * sizeof(DType)); } #pragma GCC diagnostic pop } } } template <typename DType> void LstmForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, const int P, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* b_ptr, DType* y_ptr, DType* hy_ptr, DType* cy_ptr) { const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, P ? P : H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t cell_size = N * H; const index_t projection_size = (P ? P : H) * N; DType* y_tmp_ptr = ws + (T + 1) * cell_size * 4 + cell_size * 2; DType* y_cur_ptr = y_ptr; int idx = 0; // state & cell state's idx; bool flag = L % 2 ? false : true; for (int i = 0; i < L; ++i) { const index_t input_size = i ? (P ? P : H) * D : I; index_t w_size = (input_size + (P ? P : H)) * H * 4; if (P > 0) { w_size += P * H; } // If bidirectional, need space to save current layer output y. if (D == 2) { y_cur_ptr = flag ? y_tmp_ptr : y_ptr; flag = !flag; } Tensor<cpu, 2, DType> x(x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 3, DType> y(y_cur_ptr, Shape3(T, N, (P ? P : H) * D)); LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, false, T, N, input_size, H, P, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); // If bidirectional, then calculate the reverse direction's forward result. if (D == 2) { w_ptr += w_size; b_ptr += b_size; ++idx; if (state_outputs) { hy_ptr += projection_size; cy_ptr += cell_size; } LstmForwardInferenceSingleLayer<DType>(ws, state_outputs, true, T, N, input_size, H, P, x, hx[idx], cx[idx], y, w_ptr, b_ptr, hy_ptr, cy_ptr); } // Don't need to move pointer in the last layer. if (i != L - 1) { w_ptr += w_size; b_ptr += b_size; x_ptr = y_cur_ptr; ++idx; if (state_outputs) { hy_ptr += projection_size; cy_ptr += cell_size; } } } } template <typename DType> void LstmBackwardSingleLayer(DType* ws, DType* rs, DType* tmp_buf, bool bid, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, const Tensor<cpu, 2, DType> &cx, const Tensor<cpu, 3, DType> &y, const Tensor<cpu, 3, DType> &dy, const Tensor<cpu, 2, DType> &dx, const Tensor<cpu, 2, DType> &dhx, const Tensor<cpu, 2, DType> &dcx, DType* dhy_ptr, DType* dcy_ptr, DType* w_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell) { using namespace mshadow; const Tensor<cpu, 2, DType> wx(w_ptr, Shape2(H * 4, I)); const Tensor<cpu, 2, DType> wh(w_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 2, DType> dwx(dw_ptr, Shape2(H * 4, I)); Tensor<cpu, 2, DType> dwh(dw_ptr + I * H * 4, Shape2(H * 4, H)); Tensor<cpu, 1, DType> dbx(db_ptr, Shape1(H * 4)); Tensor<cpu, 1, DType> dbh(dbx.dptr_ + H * 4, Shape1(H * 4)); DType *c_ptr = bid ? rs + T * N * H * 7 : rs; const Tensor<cpu, 3, DType> c(c_ptr, Shape3(T, N, H)); const Tensor<cpu, 4, DType> ifgo(c_ptr + T * N * H, Shape4(T, N, H, 4)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H * 4 * H; ++i) { dwh.dptr_[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 4 * H; ++i) { dbx.dptr_[i] = 0; dbh.dptr_[i] = 0; } } Tensor<cpu, 4, DType> difgo(ws, Shape4(T, N, 4, H)); Tensor<cpu, 2, DType> dh(ws + T * N * H * 4, Shape2(N, H)); Tensor<cpu, 2, DType> dc(dh.dptr_ + N * H, Shape2(N, H)); Tensor<cpu, 2, DType> htmp(dc.dptr_ + N * H, Shape2(N, H)); const int offset = bid ? H : 0; const DType alpha = 1.0; const DType beta0 = 0.0; const DType beta1 = 1.0; const DType beta2 = 2.0; const index_t cell_size = N * H; if (dhy_ptr != nullptr) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dh.dptr_[i] = dhy_ptr[i]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dh.dptr_[i] = 0; } } if (dcy_ptr != nullptr) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dc.dptr_[i] = dcy_ptr[i]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < cell_size; ++i) { dc.dptr_[i] = 0; } } for (index_t i = T - 1; i >= 0; --i) { index_t t = bid ? T - 1 - i : i; index_t tnext = bid ? t + 1 : t - 1; const Tensor<cpu, 2, DType>& dhnext = i ? dh : dhx; const Tensor<cpu, 2, DType>& dcnext = i ? dc : dcx; const Tensor<cpu, 2, DType>& hnext = i ? htmp : hx; const Tensor<cpu, 2, DType>& cnext = i ? c[i - 1] : cx; #pragma omp parallel for num_threads(omp_threads) for (index_t jk = 0; jk < cell_size; ++jk) { index_t j = jk / H; index_t k = jk % H; DType tc = tanh(c[i][j][k]); DType it = ifgo[i][j][k][0]; DType ft = ifgo[i][j][k][1]; DType gt = ifgo[i][j][k][2]; DType ot = ifgo[i][j][k][3]; dh[j][k] += dy[t][j][k + offset]; dc[j][k] += dh[j][k] * ot * (1 - tc * tc); difgo[t][j][0][k] = dc[j][k] * gt * it * (1 - it); difgo[t][j][1][k] = dc[j][k] * cnext[j][k] * ft * (1 - ft); difgo[t][j][2][k] = dc[j][k] * it * (1 - gt * gt); difgo[t][j][3][k] = dh[j][k] * tc * ot * (1 - ot); if (req_statecell != kNullOp || i > 0) { dcnext[j][k] = dc[j][k] * ft; } if (i) { htmp[j][k] = y[tnext][j][k + offset]; } } Tensor<cpu, 2, DType> dyh(difgo[t].dptr_, Shape2(N, H * 4)); if (req_state != kNullOp || i > 0) { linalg_gemm(dyh, wh, dhnext, alpha, beta0, false, false); } if (req_params != kNullOp) { if (req_params != kAddTo) { linalg_gemm(dyh, hnext, dwh, alpha, beta1, true, false); } else { linalg_gemm(dyh, hnext, dwh, alpha, beta2, true, false); // generate dwx every time step for AddTo Tensor<cpu, 2, DType> x_t(x.dptr_ + i * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> dyx_t(difgo.dptr_ + i * N * H * 4, Shape2(N, H * 4)); linalg_gemm(dyx_t, x_t, dwx, alpha, beta2, true, false); } } } Tensor<cpu, 2, DType> dyx(difgo.dptr_, Shape2(T * N, H * 4)); if (req_data != kNullOp) { linalg_gemm(dyx, wx, dx, alpha, bid ? beta1 : beta0, false, false); } if (req_params != kNullOp && req_params != kAddTo) { linalg_gemm(dyx, x, dwx, alpha, beta0, true, false); } const index_t row = T * N; const index_t col = H * 4; if (req_params != kNullOp) { if (req_params != kAddTo) { for (index_t i = 0; i < row; ++i) { #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { dbx[j] += dyx[i][j]; dbh[j] = dbx[j]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf, Shape2(col, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + col * T, Shape2(col, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < col * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { for (index_t i = 0; i < N; ++i) { tmp_dbx[j][t] += dyx[t * N + i][j]; tmp_dbh[j][t] = tmp_dbx[j][t]; } } #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < col; ++j) { dbx[j] += tmp_dbx[j][t] + dbx[j]; dbh[j] += tmp_dbh[j][t] + dbh[j]; } } } } } template <typename DType> void LstmBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, const index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* cx_ptr, DType* w_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dcy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dcx_ptr, DType* dw_ptr, DType* db_ptr, int req_data, int req_params, int req_state, int req_statecell, const float dropout) { DType* dropout_random = rs + (L - 1) * D * T * N * H; DType* rs2 = rs + (L - 1) * D * T * N * H; DType* tmp_buf = ws; DType* ws2 = tmp_buf + 8 * T * H; const int total_layers = D * L; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> cx(cx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dhx(dhx_ptr, Shape3(total_layers, N, H)); Tensor<cpu, 3, DType> dcx(dcx_ptr, Shape3(total_layers, N, H)); const index_t b_size = 2 * H * 4; const index_t r_size = D * T * N * H * 6; const index_t y_offset = T * N * H * 5; const index_t w_size1 = (I + H) * H * 4; // first layer const index_t w_size2 = (D * H + H) * H * 4; // other layers const index_t cell_size = N * H; DType* dy_tmp_ptr = ws2 + T * cell_size * 4 + cell_size * 3; for (int i = L - 1; i >= 0; --i) { const index_t input_size = i ? H * D : I; const index_t w_size = i ? w_size2 : w_size1; int idx = i * D; DType* w_cur_ptr = i ? w_ptr + (w_size1 + (i - 1) * w_size2) * D : w_ptr; DType* dw_cur_ptr = i ? dw_ptr + (w_size1 + (i - 1) * w_size2) * D : dw_ptr; DType* db_cur_ptr = db_ptr + i * b_size * D; DType* rs_cur_ptr = rs2 + i * r_size; DType* dhy_cur_ptr = dhy_ptr ? dhy_ptr + i * cell_size * D : nullptr; DType* dcy_cur_ptr = dcy_ptr ? dcy_ptr + i * cell_size * D : nullptr; Tensor<cpu, 3, DType> y(rs_cur_ptr + y_offset, Shape3(T, N, H * D)); Tensor<cpu, 3, DType> dy(dy_ptr, Shape3(T, N, H * D)); Tensor<cpu, 2, DType> x(i ? y.dptr_ - r_size : x_ptr, Shape2(T * N, input_size)); Tensor<cpu, 2, DType> dx(i ? dy_tmp_ptr : dx_ptr, Shape2(T * N, input_size)); LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, false, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); if (D == 2) { w_cur_ptr += w_size; dw_cur_ptr += w_size; db_cur_ptr += b_size; ++idx; dhy_cur_ptr = dhy_ptr ? dhy_cur_ptr + cell_size : nullptr; dcy_cur_ptr = dcy_ptr ? dcy_cur_ptr + cell_size : nullptr; LstmBackwardSingleLayer<DType>(ws2, rs_cur_ptr, tmp_buf, true, T, N, input_size, H, x, hx[idx], cx[idx], y, dy, dx, dhx[idx], dcx[idx], dhy_cur_ptr, dcy_cur_ptr, w_cur_ptr, dw_cur_ptr, db_cur_ptr, req_data, req_params, req_state, req_statecell); } if (dropout > 0.0f && i > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (index_t j = 0; j < T * N * D * H; j++) { if (dropout_random[j] == 0) { dx.dptr_[j] = 0; } else { dx.dptr_[j] = dx.dptr_[j] / (1.0f - dropout); } } } dy_ptr = dx.dptr_; } } template<typename DType> void GruForwardInferenceSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gemmC2 + N * 3 * H; DType* zt = rt + N * H; DType* nt = zt + N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2: nullptr; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } gemmC1_t = gemmC1 + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j]+ back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr) { DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; DType* y_tmp = ws; DType* y_l = x_ptr; DType* tmp_buf = y_tmp + D * T * N * H; DType* ws2 = y_tmp + D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; for (int l = 0; l < L; l++) { Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I)); if ((L + l) % 2) { y_l = y_ptr; } else { y_l = y_tmp; } Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l); hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } } template<typename DType> void GruForwardTrainingSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* y_ptr, DType* hy_ptr) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, 3 * H] DType* gemmC2 = gemmC1 + D * T * N * 3 * H; // N * 3 * H DType* rt = gateR; DType* zt = gateZ; DType* nt = gateN; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + 3 * H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + 3 * H * 2 : nullptr; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_Mnh = Mnh + T * N * H; DType* back_gemmC1 = gemmC1 + T * N * 3 * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(3, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, 3 * H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, 3 * H)); // x * wx.T : [T * N, I] * [I, 3 * H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[3 * H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; gemmC1_t = gemmC1 + t * N * 3 * H; DType* Mnht = Mnh + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; Mnht[i * H + j] = gemmC2[ntb + j] + bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + bx[0][j] + bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + bx[1][j] + bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + bh[2][j])); ht[i * D * H + j] = (1-zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * ht_1[i * D * H + j]; } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { rt = back_gateR + (T - 1 - t) * N * H; zt = back_gateZ + (T - 1 - t) * N * H; nt = back_gateN + (T - 1 - t) * N * H; gemmC1_t = back_gemmC1 + (T - 1 - t) * N * 3 * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); DType* back_Mnht = back_Mnh + (T - 1 - t) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t rtb = i * 3 * H; index_t ztb = i * 3 * H + H; index_t ntb = i * 3 * H + 2 * H; back_Mnht[i * H + j] = gemmC2[ntb + j] + back_bh[2][j]; rt[i * H + j] = sigmoid(gemmC1_t[rtb + j] + gemmC2[rtb + j] + back_bx[0][j] + back_bh[0][j]); zt[i * H + j] = sigmoid(gemmC1_t[ztb + j] + gemmC2[ztb + j] + back_bx[1][j] + back_bh[1][j]); nt[i * H + j] = tanh(gemmC1_t[ntb + j] + back_bx[2][j] + rt[i * H + j] * (gemmC2[ntb + j] + back_bh[2][j])); back_ht[i * D * H + j] = (1 - zt[i * H + j]) * nt[i * H + j] + zt[i * H + j] * back_ht_1[i * D * H + j]; } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void GruForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, const float dropout) { DType* wx = w_ptr; DType* wh = wx + I * H * 3; DType* bx = wh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* bh = bx + H * 3; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; DType* gateR_l = rs; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* dropout_random = Mnh_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* ws2 = tmp_buf + D * N * H; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { int rand_data = rand_r(&seed_); if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout; y_tmp[i] = y_tmp[i] / (1.0f - dropout); } } } Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); Tensor<cpu, 2, DType> hx_l = hx[D * l]; GruForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, gateR_l, gateZ_l, gateN_l, Mnh_l, y_l, hy_l); gateR_l = gateR_l + T * D * N * H; gateZ_l = gateZ_l + T * D * N * H; gateN_l = gateN_l + T * D * N * H; Mnh_l = Mnh_l + T * D * N * H; hy_l = hy_l + D * N * H; bx_l = bx_l + 3 * H * D * 2; bh_l = bh_l + 3 * H * D * 2; wx_l = wx_l + I * H * 3 * D + H * H * 3 * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * 3 * H; } const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = y_l[i]; } } template <typename DType> void GruBackwardSingleLayer(DType* ws, DType* tmp_buf, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* gateR, DType* gateZ, DType* gateN, DType* Mnh, DType* dx, DType* dhx, DType* dwx, DType* dwh, DType* dbx, DType* dbh, int req_data, int req_params, int req_state) { DType* dyt; DType* ht1; // [N, D, H] DType* rt; DType* zt; DType* nt; DType* dat; DType* dart; DType* dar = ws; // [T, N, 3 * H] DType* da = dar + T * N * 3 * H; // [T, N, 3 * H] DType* dht1 = da + T * N * 3 * H; // [D, N, H] DType* hx_ = dht1 + D * N * H; // [N, D, H] DType* Mnht = Mnh; DType* back_ht1; DType* back_dht1 = dht1 + N * H; // [N, H] DType* back_Mnht = Mnh + T * N * H; DType* back_gateR = gateR + T * N * H; DType* back_gateZ = gateZ + T * N * H; DType* back_gateN = gateN + T * N * H; DType* back_wx_ptr = wx_ptr + I * 3 * H + H * 3 * H; DType* back_wh_ptr = wh_ptr + I * 3 * H + H * 3 * H; DType* back_dwx = dwx + I * 3 * H + H * 3 * H; DType* back_dwh = dwh + I * 3 * H + H * 3 * H; DType* back_dbx = dbx + 3 * H * 2; DType* back_dbh = dbh + 3 * H * 2; DType alpha = 1.0; DType beta = 0.0; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H * 3, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 3, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 3, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H * 3 * H; ++i) { dwh[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * 3 * H; ++i) { dbx[i] = 0; dbh[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { dht1[i] = dhy_ptr[i]; } else { dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + j] = hx[i][j]; } } if (D == 2) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { back_dht1[i] = dhy_ptr[N * H + i]; } else { back_dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + H + j] = hx[N + i][j]; } } } for (index_t t = T - 1; t >= 0; --t) { if (t) { ht1 = y_ptr + (t - 1) * N * D * H; } else { ht1 = hx_; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { dht1[i * H + j] += dyt[i * D * H + j]; } } rt = gateR + t * N * H; zt = gateZ + t * N * H; nt = gateN + t * N * H; Mnht = Mnh + t * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { int nid = i * 3 * H + 2 * H + j; int zid = i * 3 * H + H + j; int rid = i * 3 * H + j; int id = i * H + j; dat[nid] = dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = dht1[id] * (ht1[i * D * H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; dht1[id] = dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = dart * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H)); Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_dwx, alpha, beta, true, false); } // dwh = dart.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H)); Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(3 * H, H)); Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N)); linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N * T; ++j) { dbx[i] += da[j * 3 * H + i]; dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T * 3; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { dbx[i] += tmp_dbx[i][t] + dbx[i]; dbh[i] += tmp_dbh[i][t] + dbh[i]; } } } } alpha = 1.0; beta = 0.0; // dx = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da, wx, d_dx, alpha, beta, false, false); } // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(3 * H, I)); linalg_gemm(d_da, x, d_dwx, alpha, beta, true, false); } if (D == 2) { for (index_t t = 0; t < T; ++t) { if (t == T-1) { back_ht1 = hx_; } else { back_ht1 = y_ptr + (t + 1) * N * D * H; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { back_dht1[i * H + j] += dyt[i * D * H + H + j]; } } rt = back_gateR + t * N * H; zt = back_gateZ + t * N * H; nt = back_gateN + t * N * H; back_Mnht = Mnh + (T + t) * N * H; dat = da + t * N * 3 * H; dart = dar + t * N * 3 * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t nid = i * 3 * H + 2 * H + j; index_t zid = i * 3 * H + H + j; index_t rid = i * 3 * H + j; index_t id = i * H + j; dat[nid] = back_dht1[id] * (1 - zt[id]) * (1 - nt[id] * nt[id]); dart[zid] = dat[zid] = back_dht1[id] * (back_ht1[i * D * H + H + j] - nt[id]) * zt[id] * (1 - zt[id]); dart[rid] = dat[rid] = dat[nid] * back_Mnht[id] * rt[id] * (1 - rt[id]); dart[nid] = dat[nid] * rt[id]; back_dht1[id] = back_dht1[id] * zt[id]; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = da * wh [N, H] = [N, 3 * H] * [3 * H, H] Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H)); linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false); // dwh = da.T * ht1 [3 * H, H] = [3 * H, N] * [N, H] Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(3 * H, H)); Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H)); Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N)); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [3 * H, I] = [3 * H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dat(dat, Shape2(N, 3 * H)); Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_dat, d_xt, d_back_dwx, alpha, beta, true, false); } linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, 3 * H] = [1, N] * [N, 3 * H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N * T; ++j) { back_dbx[i] += da[j * 3 * H + i]; back_dbh[i] += dar[j * 3 * H + i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H * 3, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + 3 * H * T, Shape2(H * 3, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T * 3; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += da[t * N * 3 * H + j * 3 * H + i]; tmp_dbh[i][t] += dar[t * N * 3 * H + j * 3 * H + i]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < 3 * H; ++i) { back_dbx[i] += tmp_dbx[i][t] + back_dbx[i]; back_dbh[i] += tmp_dbh[i][t] + back_dbh[i]; } } } } alpha = 1.0; beta = 1.0; // dxt = da * wx [T * N, I] = [T * N, 3 * H] * [3 * H, I] Tensor<cpu, 2, DType> d_da2(da, Shape2(T * N, 3 * H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_da2, back_wx, d_dx, alpha, beta, false, false); } alpha = 1.0; beta = 0.0; // dwx = da.T * x [3 * H, I] = [3 * H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(3 * H, I)); linalg_gemm(d_da2, x, d_back_dwx, alpha, beta, true, false); } } if (req_state != kNullOp) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H * D; ++i) { dhx[i] = dht1[i]; } } } template <typename DType> void GruBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dw_ptr, int req_data, int req_params, int req_state, const float dropout) { DType* wx = w_ptr; DType* dwx = dw_ptr; DType* dwh = dwx + I * H * 3; DType* dbx = dwh + H * H * 3 + (D - 1) * (H * H * 3 + I * H * 3) + (L - 1) * ((D + 1) * H) * H * 3 * D; DType* gateR_l = rs + (L - 1) * T * D * N * H; DType* gateZ_l = gateR_l + L * T * D * N * H; DType* gateN_l = gateZ_l + L * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* Mnh_l = y_l + L * T * N * H * D; DType* dropout_random = Mnh_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* dx_l = tmp_buf + T * N * D * H + 3 * H * T * 2; DType* ws2 = dx_l + T * N * D * H; DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* wh_l = wx_l; if (L == 1) { wh_l = wh_l + I * H * 3; } else { wh_l = wh_l + (D * H) * H * 3; } DType* dhy_l = nullptr; if (dhy_ptr) dhy_l = dhy_ptr + (L - 1) * D * N * H; DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * 3 * H + D * I * 3 * H + D * H * 3 * H; DType* dwh_l = nullptr; if (L == 1) { dwh_l = dwx_l + I * H * 3; } else { dwh_l = dwx_l + (D * H) * H * 3; } DType* dbx_l = dbx + (L - 1) * D * 3 * H * 2; DType* dbh_l = dbx_l + 3 * H; DType* dhx_l = dhx_ptr + (L - 1) * D * N * H; DType* dy_l = dy_ptr; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H)); index_t inputsize = I; DType* y_tmp = y_l - T * N * H * D; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = L - 1; l >= 0; --l) { if (l == 0) { I = inputsize; y_tmp = x_ptr; dx_l = dx_ptr; } else { I = D * H; } Tensor<cpu, 2, DType> hx_l = hx[l]; Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); GruBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l, dhy_l, gateR_l, gateZ_l, gateN_l, Mnh_l, dx_l, dhx_l, dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state); if (dropout > 0.0f && l > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { if (dropout_random[i] == 0) { dx_l[i] = 0; } else { dx_l[i] = dx_l[i] / (1.0f - dropout); } } } if (l > 0) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { dy_l[i] = dx_l[i]; } gateR_l = gateR_l - T * D * N * H; gateZ_l = gateZ_l - T * D * N * H; gateN_l = gateN_l - T * D * N * H; Mnh_l = Mnh_l - T * D * N * H; dhx_l = dhx_l - D * N * H; if (dhy_l) dhy_l = dhy_l - D * N * H; y_l = y_l - T * N * H * D; y_tmp = y_l; if (l == 1) { wx_l = wx_l - (inputsize + H) * H * 3 * D; wh_l = wx_l + inputsize * 3 * H; dwx_l = dwx_l - (inputsize + H) * H * 3 * D; dwh_l = dwx_l + inputsize * 3 * H; } else { wx_l = wx_l - (I + H) * H * 3 * D; wh_l = wx_l + I * 3 * H; dwx_l = dwx_l - (I + H) * H * 3 * D; dwh_l = dwx_l + I * 3 * H; } dbx_l = dbx_l - D * 3 * H * 2; dbh_l = dbx_l + 3 * H; } } } template<typename DType> void VanillaRNNForwardInferenceSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* y_ptr, DType* hy_ptr, int mode) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T-1) * N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, H] DType* gemmC2 = gemmC1 + D * T * N * H; // N * H DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2: nullptr; DType* back_gemmC1 = gemmC1 + T * N * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H)); // x * wx.T : [T * N, I] * [I, H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } gemmC1_t = gemmC1 + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } else { ht[i * D * H + j] = relu(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } else { back_ht[i * D * H + j] = relu(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void VanillaRNNForwardInference(DType* ws, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, int mode) { DType* wx = w_ptr; DType* wh = wx + I * H; DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* bh = bx + H; DType* y_tmp = ws; DType* y_l = x_ptr; DType* tmp_buf = y_tmp + D * T * N * H; DType* ws2 = y_tmp + D * T * N * H + D * H * N; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; for (int l = 0; l < L; l++) { Tensor<cpu, 2, DType> x_l(y_l, Shape2(T * N, I)); if ((L + l) % 2) { y_l = y_ptr; } else { y_l = y_tmp; } Tensor<cpu, 2, DType> hx_l = hx[D * l]; VanillaRNNForwardInferenceSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, y_l, hy_l, mode); hy_l = hy_l + D * N * H; bx_l = bx_l + H * D * 2; bh_l = bh_l + H * D * 2; wx_l = wx_l + I * H * D + H * H * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * H; } } template<typename DType> void VanillaRNNForwardTrainingSingleLayer(DType* ws, DType* tmp_buf, bool state_outputs, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* bx_ptr, DType* bh_ptr, DType* gateN, DType* y_ptr, DType* hy_ptr, int mode) { DType* ht = y_ptr; DType* ht_1 = y_ptr; DType* back_ht_1 = y_ptr + (T - 1)* N * H * D + H; DType* back_ht = back_ht_1; DType* gemmC1 = ws; // [D, T, N, H] DType* gemmC2 = gemmC1 + D * T * N * H; // N * H DType* nt = gateN; DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_bx_ptr = (bx_ptr != nullptr)? bx_ptr + H * 2 : nullptr; DType* back_bh_ptr = (bh_ptr != nullptr)? bh_ptr + H * 2 : nullptr; DType* back_gateN = gateN + T * N * H; DType* back_gemmC1 = gemmC1 + T * N * H; DType* gemmC1_t = gemmC1; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> bx(bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> bh(bh_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H * 1, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H * 1, H)); const Tensor<cpu, 2, DType> back_bx(back_bx_ptr, Shape2(1, H)); const Tensor<cpu, 2, DType> back_bh(back_bh_ptr, Shape2(1, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (D == 1) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * H + j] = hx[i][j]; } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { y_ptr[i * D * H + j] = hx[i][j]; back_ht_1[i * D * H + j] = hx[N + i][j]; } } Tensor<cpu, 2, DType> dgemmC1(ws, Shape2(T * N, H)); Tensor<cpu, 2, DType> dgemmC2(gemmC2, Shape2(N, H)); Tensor<cpu, 2, DType> dback_gemmC1(back_gemmC1, Shape2(T * N, H)); // x * wx.T : [T * N, I] * [I, H] DType alpha = 1.0; DType beta = 0.0; linalg_gemm(x, wx, dgemmC1, alpha, beta, false, true); if (D == 2) { linalg_gemm(x, back_wx, dback_gemmC1, alpha, beta, false, true); } for (index_t t = 0; t < T; t++) { // perform the first direction, X * wx and H * wh for each step // ht-1 * wh, ht-1:[N, H] wh:[H, H] Tensor<cpu, 2, DType> dht_1(ht_1, Shape2(N, D * H)); if (D == 1) { linalg_gemm(dht_1, wh, dgemmC2, alpha, beta, false, true); } else { Tensor<cpu, 3, DType> dht_1_tmp = Tensor<cpu, 3, DType>(reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dht_1_tmp = reshape(dht_1.T(), Shape3(D, H, N)); linalg_gemm(dht_1_tmp[0], wh, dgemmC2, alpha, beta, true, true); } nt = gateN + t * N * H; gemmC1_t = gemmC1 + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { nt[tb + j] = ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]); } else { nt[tb + j] = gemmC1_t[tb + j] + bx[0][j] + gemmC2[tb + j] + bh[0][j]; ht[i * D * H + j] = relu(nt[tb + j]); } } } ht_1 = ht; ht = ht + D * H * N; // perform the second direction if (D == 2) { nt = back_gateN + (T - 1 - t) * N * H; gemmC1_t = back_gemmC1 + (T - 1 - t) * N * H; Tensor<cpu, 2, DType> dback_ht_1(back_ht_1 - H, Shape2(N, D * H)); Tensor<cpu, 3, DType> dback_ht_1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); dback_ht_1_tmp = reshape(dback_ht_1.T(), Shape3(D, H, N)); linalg_gemm(dback_ht_1_tmp[1], back_wh, dgemmC2, alpha, beta, true, true); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t tb = i * H; if (mode == 1) { nt[tb + j] = back_ht[i * D * H + j] = tanh(gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]); } else { nt[tb + j] = gemmC1_t[tb + j] + back_bx[0][j] + gemmC2[tb + j] + back_bh[0][j]; back_ht[i * D * H + j] = relu(nt[tb + j]); } } } back_ht_1 = back_ht; back_ht = back_ht - D * H * N; } } // copy last state to hy, from(N, H * D) to (D, N, H) if (state_outputs) { if (D == 1) { DType* y_start = y_ptr + (T - 1) * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * H + j]; } } else { DType* y_start = y_ptr + (T - 1) * N * H * D; DType* y_back_start = y_ptr + H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; i++) for (int j = 0; j < H; j++) { hy_ptr[i * H + j] = y_start[i * D * H + j]; hy_ptr[N * H + i * H + j] = y_back_start[i * D * H + j]; } } } } template <typename DType> void VanillaRNNForwardTraining(DType* ws, DType* rs, bool state_outputs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* y_ptr, DType* hy_ptr, const float dropout, int mode) { DType* wx = w_ptr; DType* wh = wx + I * H; DType* bx = wh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* bh = bx + H; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(D * L, N, H)); DType* hy_l = hy_ptr; DType* gateN_l = rs; DType* y_l = gateN_l + L * T * D * N * H; DType* dropout_random = y_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* ws2 = tmp_buf + D * N * H; DType* wx_l = wx; DType* wh_l = wh; DType* bx_l = bx; DType* bh_l = bh; DType* y_tmp = x_ptr; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); unsigned int seed_ = 17 + rand() % 4096; // NOLINT(runtime/threadsafe_fn) for (int l = 0; l < L; l++) { if (l != 0) { y_tmp = y_l; y_l = y_l + T * N * H * D; } if (dropout > 0.0f && l > 0) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { int rand_data = rand_r(&seed_); if (static_cast<float>(rand_data % 1000) < static_cast<float>(1000 * dropout)) { dropout_random[(l - 1) * T * N * I + i] = 0; y_tmp[i] = 0; } else { dropout_random[(l - 1) * T * N * I + i] = 1.0f - dropout; y_tmp[i] = y_tmp[i] / (1.0f - dropout); } } } Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); Tensor<cpu, 2, DType> hx_l = hx[D * l]; VanillaRNNForwardTrainingSingleLayer<DType>(ws2, tmp_buf, state_outputs, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, bx_l, bh_l, gateN_l, y_l, hy_l, mode); gateN_l = gateN_l + T * D * N * H; hy_l = hy_l + D * N * H; bx_l = bx_l + H * D * 2; bh_l = bh_l + H * D * 2; wx_l = wx_l + I * H * D + H * H * D; if (l == 0) { I = D * H; } wh_l = wx_l + I * H; } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { y_ptr[i] = y_l[i]; } } template <typename DType> void VanillaRNNBackwardSingleLayer(DType* ws, DType* tmp_buf, const int D, const index_t T, const index_t N, const index_t I, const int H, const Tensor<cpu, 2, DType> &x, const Tensor<cpu, 2, DType> &hx, DType* wx_ptr, DType* wh_ptr, DType* y_ptr, DType* dy_ptr, DType* dhy_ptr, DType* gateN, DType* dx, DType* dhx, DType* dwx, DType* dwh, DType* dbx, DType* dbh, int req_data, int req_params, int req_state, int mode) { DType* dyt; DType* ht1; // [N, D, H] DType* dart; DType* nt; DType* dar = ws; // [T, N, H] DType* dht1 = dar + T * N * H; // [D, N, H] DType* hx_ = dht1 + D * N * H; // [N, D, H] DType* back_ht1; DType* back_dht1 = dht1 + N * H; // [N, H] DType* back_gateN = gateN + T * N * H; DType* back_wx_ptr = wx_ptr + I * H + H * H; DType* back_wh_ptr = wh_ptr + I * H + H * H; DType* back_dwx = dwx + I * H + H * H; DType* back_dwh = dwh + I * H + H * H; DType* back_dbx = dbx + H * 2; DType* back_dbh = dbh + H * 2; DType alpha = 1.0; DType beta = 0.0; const Tensor<cpu, 2, DType> wx(wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> wh(wh_ptr, Shape2(H, H)); const Tensor<cpu, 2, DType> back_wx(back_wx_ptr, Shape2(H, I)); const Tensor<cpu, 2, DType> back_wh(back_wh_ptr, Shape2(H, H)); const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (req_params != kNullOp && req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H * H; ++i) { dwh[i] = 0; } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < D * H; ++i) { dbx[i] = 0; dbh[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { dht1[i] = dhy_ptr[i]; } else { dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + j] = hx[i][j]; } } if (D == 2) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H; ++i) { if (dhy_ptr) { back_dht1[i] = dhy_ptr[N * H + i]; } else { back_dht1[i] = 0; } } #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { hx_[i * D * H + H + j] = hx[N + i][j]; } } } for (index_t t = T - 1; t >= 0; --t) { if (t) { ht1 = y_ptr + (t - 1) * N * D * H; } else { ht1 = hx_; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { dht1[i * H + j] += dyt[i * D * H + j]; } } nt = gateN + t * N * H; dart = dar + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t id = i * H + j; if (mode == 1) { dart[id] = dht1[id] * (1 - nt[id] * nt[id]); } else { dart[id] = nt[id] > 0.0f ? static_cast<float>(dht1[id]) : 0.0f; } dht1[id] = 0; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = dart * wh [N, H] = [N, H] * [H, H] Tensor<cpu, 2, DType> d_dht1(dht1, Shape2(N, H)); Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H)); linalg_gemm(d_dart, wh, d_dht1, alpha, beta, false, false); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [H, I] = [H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I)); linalg_gemm(d_dart, d_xt, d_dwx, alpha, beta, true, false); } // dwh = dart.T * ht1 [H, H] = [H, N] * [N, H] Tensor<cpu, 2, DType> d_ht1(ht1, Shape2(N, D * H)); Tensor<cpu, 2, DType> d_dwh(dwh, Shape2(H, H)); Tensor<cpu, 3, DType> d_ht1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_ht1_tmp = reshape(d_ht1.T(), Shape3(D, H, N)); linalg_gemm(d_dart, d_ht1_tmp[0], d_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, H] = [1, N] * [N, H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N * T; ++j) { dbx[i] += dar[j * H + i]; dbh[i] = dbx[i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += dar[t * N * H + j * H + i]; tmp_dbh[i][t] = tmp_dbx[i][t]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { dbx[i] += tmp_dbx[i][t] + dbx[i]; dbh[i] = dbx[i]; } } } } alpha = 1.0; beta = 0.0; // dx = da * wx [T * N, I] = [T * N, H] * [H, I] Tensor<cpu, 2, DType> d_dar(dar, Shape2(T * N, H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_dar, wx, d_dx, alpha, beta, false, false); } // dwx = da.T * x [H, I] = [H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_dwx(dwx, Shape2(H, I)); linalg_gemm(d_dar, x, d_dwx, alpha, beta, true, false); } if (D == 2) { for (index_t t = 0; t < T; ++t) { if (t == T-1) { back_ht1 = hx_; } else { back_ht1 = y_ptr + (t + 1) * N * D * H; } // add dy[T, N, D, H] to dhy[D, N, H] dyt = dy_ptr + t * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { back_dht1[i * H + j] += dyt[i * D * H + H + j]; } } nt = back_gateN + t * N * H; dart = dar + t * N * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N; ++i) { for (int j = 0; j < H; ++j) { index_t id = i * H + j; if (mode == 1) { dart[id] = back_dht1[id] * (1 - nt[id] * nt[id]); } else { dart[id] = nt[id] > 0.0f ? static_cast<float>(back_dht1[id]) : 0.0f; } back_dht1[id] = 0; } } if (req_params != kNullOp) { alpha = 1.0; beta = 1.0; // dht1 = da * wh [N, H] = [N, H] * [H, H] Tensor<cpu, 2, DType> d_dart(dart, Shape2(N, H)); Tensor<cpu, 2, DType> d_back_dht1(back_dht1, Shape2(N, H)); linalg_gemm(d_dart, back_wh, d_back_dht1, alpha, beta, false, false); // dwh = da.T * ht1 [H, H] = [H, N] * [N, H] Tensor<cpu, 2, DType> d_back_dwh(back_dwh, Shape2(H, H)); Tensor<cpu, 2, DType> d_back_ht1(back_ht1 + H, Shape2(N, D * H)); Tensor<cpu, 3, DType> d_back_ht1_tmp = Tensor<cpu, 3, DType> (reinterpret_cast<DType*>(tmp_buf), Shape3(D, H, N)); d_back_ht1_tmp = reshape(d_back_ht1.T(), Shape3(D, H, N)); if (req_params == kAddTo) { beta = 2.0; // dwx = da.T * x [ H, I] = [H, N] * [N, I] for AddTo Tensor<cpu, 2, DType> d_xt(x.dptr_ + t * N * I, Shape2(N, I)); Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I)); linalg_gemm(d_dart, d_xt, d_back_dwx, alpha, beta, true, false); } linalg_gemm(d_dart, d_back_ht1_tmp[0], d_back_dwh, alpha, beta, true, true); } } if (req_params != kNullOp) { // dbx = e * da [1, H] = [1, N] * [N, H] if (req_params != kAddTo) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N * T; ++j) { back_dbx[i] += dar[j * H + i]; back_dbh[i] = back_dbx[i]; } } } else { const Tensor<cpu, 2, DType> tmp_dbx(tmp_buf + T * N * D * H, Shape2(H, T)); const Tensor<cpu, 2, DType> tmp_dbh(tmp_buf + T * N * D * H + H * T, Shape2(H, T)); #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < H * T; ++i) { tmp_dbx.dptr_[i] = 0; tmp_dbh.dptr_[i] = 0; } for (index_t t = T - 1; t >= 0; --t) { #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { for (index_t j = 0; j < N; ++j) { tmp_dbx[i][t] += dar[t * N * H + j * H + i]; tmp_dbh[i][t] = tmp_dbx[i][t]; } } #pragma omp parallel for num_threads(omp_threads) for (int i = 0; i < H; ++i) { back_dbx[i] += tmp_dbx[i][t] + back_dbx[i]; back_dbh[i] = back_dbx[i]; } } } } alpha = 1.0; beta = 1.0; // dxt = da * wx [T * N, I] = [T * N, H] * [H, I] Tensor<cpu, 2, DType> d_dar2(dar, Shape2(T * N, H)); if (req_data != kNullOp) { Tensor<cpu, 2, DType> d_dx(dx, Shape2(T * N, I)); linalg_gemm(d_dar2, back_wx, d_dx, alpha, beta, false, false); } alpha = 1.0; beta = 0.0; // dwx = da.T * x [H, I] = [H, T * N] * [T * N, I] if (req_params != kNullOp && req_params != kAddTo) { Tensor<cpu, 2, DType> d_back_dwx(back_dwx, Shape2(H, I)); linalg_gemm(d_dar2, x, d_back_dwx, alpha, beta, true, false); } } if (req_state != kNullOp) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < N * H * D; ++i) { dhx[i] = dht1[i]; } } } template <typename DType> void VanillaRNNBackward(DType* ws, DType* rs, const int L, const int D, const index_t T, const index_t N, index_t I, const int H, DType* x_ptr, DType* hx_ptr, DType* w_ptr, DType* dy_ptr, DType* dhy_ptr, DType* dx_ptr, DType* dhx_ptr, DType* dw_ptr, int req_data, int req_params, int req_state, const float dropout, int mode) { DType* wx = w_ptr; DType* dwx = dw_ptr; DType* dwh = dwx + I * H; DType* dbx = dwh + H * H + (D - 1) * (H * H + I * H) + (L - 1) * ((D + 1) * H) * H * D; DType* gateN_l = rs + (L - 1) * T * D * N * H; DType* y_l = gateN_l + L * T * D * N * H; DType* dropout_random = y_l + L * D * T * N * H; DType* tmp_buf = dropout_random + (L - 1) * D * T * N * H; DType* dx_l = tmp_buf + T * N * D * H + H * T * 2; DType* ws2 = dx_l + T * N * D * H; DType* wx_l = (L == 1)? wx : wx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H; DType* wh_l = wx_l; if (L == 1) { wh_l = wh_l + I * H; } else { wh_l = wh_l + (D * H) * H; } DType* dhy_l = nullptr; if (dhy_ptr) dhy_l = dhy_ptr + (L - 1) * D * N * H; DType* dwx_l = (L == 1)? dwx : dwx + (L - 2) * D * (D + 1) * H * H + D * I * H + D * H * H; DType* dwh_l = nullptr; if (L == 1) { dwh_l = dwx_l + I * H; } else { dwh_l = dwx_l + (D * H) * H; } DType* dbx_l = dbx + (L - 1) * D * H * 2; DType* dbh_l = dbx_l + H; DType* dhx_l = dhx_ptr + (L - 1) * D * N * H; DType* dy_l = dy_ptr; Tensor<cpu, 3, DType> hx(hx_ptr, Shape3(L, D * N, H)); index_t inputsize = I; DType* y_tmp = y_l - T * N * H * D; const int omp_threads = mxnet::engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); for (int l = L - 1; l >= 0; --l) { if (l == 0) { I = inputsize; y_tmp = x_ptr; dx_l = dx_ptr; } else { I = D * H; } Tensor<cpu, 2, DType> hx_l = hx[l]; Tensor<cpu, 2, DType> x_l(y_tmp, Shape2(T * N, I)); VanillaRNNBackwardSingleLayer<DType>(ws2, tmp_buf, D, T, N, I, H, x_l, hx_l, wx_l, wh_l, y_l, dy_l, dhy_l, gateN_l, dx_l, dhx_l, dwx_l, dwh_l, dbx_l, dbh_l, req_data, req_params, req_state, mode); if (dropout > 0.0f && l > 0 && req_data != kNullOp) { dropout_random = dropout_random - T * N * D * H; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * I; i++) { if (dropout_random[i] == 0) { dx_l[i] = 0; } else { dx_l[i] = dx_l[i] / (1.0f - dropout); } } } if (l > 0) { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < T * N * H * D; ++i) { dy_l[i] = dx_l[i]; } gateN_l = gateN_l - T * D * N * H; dhx_l = dhx_l - D * N * H; if (dhy_l) dhy_l = dhy_l - D * N * H; y_l = y_l - T * N * H * D; y_tmp = y_l; if (l == 1) { wx_l = wx_l - (inputsize + H) * H * D; wh_l = wx_l + inputsize * H; dwx_l = dwx_l - (inputsize + H) * H * D; dwh_l = dwx_l + inputsize * H; } else { wx_l = wx_l - (I + H) * H * D; wh_l = wx_l + I * H; dwx_l = dwx_l - (I + H) * H * D; dwh_l = dwx_l + I * H; } dbx_l = dbx_l - D * H * 2; dbh_l = dbx_l + H; } } } } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_RNN_IMPL_H_
chain_access_omp.c
// RUN: ${CATO_ROOT}/src/scripts/cexecute_pass.py %s -o %t // RUN: diff <(mpirun -np 1 %t) %s.reference_output #include <stdlib.h> #include <stdio.h> #include <omp.h> void use_pointer(int * p, int x) { *p = x; } int main() { int* ptr = (int*)malloc(sizeof(int)); *ptr = 0; #pragma omp parallel { use_pointer(ptr, 42); } printf("p: %d\n", *ptr); free(ptr); }
3d7pt.c
/* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 24; tile_size[1] = 24; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = alpha * (A[t%2][i][j][k]) + beta * (A[t%2][i - 1][j][k] + A[t%2][i][j - 1][k] + A[t%2][i][j][k - 1] + A[t%2][i + 1][j][k] + A[t%2][i][j + 1][k] + A[t%2][i][j][k + 1]); } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
CGOpenMPRuntime.h
//===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This provides a class for OpenMP runtime code generation. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H #include "CGValue.h" #include "clang/AST/DeclOpenMP.h" #include "clang/AST/GlobalDecl.h" #include "clang/AST/Type.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringSet.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/IR/Function.h" #include "llvm/IR/ValueHandle.h" #include "llvm/Support/AtomicOrdering.h" namespace llvm { class ArrayType; class Constant; class FunctionType; class GlobalVariable; class StructType; class Type; class Value; } // namespace llvm namespace clang { class Expr; class OMPDependClause; class OMPExecutableDirective; class OMPLoopDirective; class VarDecl; class OMPDeclareReductionDecl; class IdentifierInfo; namespace CodeGen { class Address; class CodeGenFunction; class CodeGenModule; /// A basic class for pre|post-action for advanced codegen sequence for OpenMP /// region. class PrePostActionTy { public: explicit PrePostActionTy() {} virtual void Enter(CodeGenFunction &CGF) {} virtual void Exit(CodeGenFunction &CGF) {} virtual ~PrePostActionTy() {} }; /// Class provides a way to call simple version of codegen for OpenMP region, or /// an advanced with possible pre|post-actions in codegen. class RegionCodeGenTy final { intptr_t CodeGen; typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &); CodeGenTy Callback; mutable PrePostActionTy *PrePostAction; RegionCodeGenTy() = delete; RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete; template <typename Callable> static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF, PrePostActionTy &Action) { return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action); } public: template <typename Callable> RegionCodeGenTy( Callable &&CodeGen, std::enable_if_t<!std::is_same<std::remove_reference_t<Callable>, RegionCodeGenTy>::value> * = nullptr) : CodeGen(reinterpret_cast<intptr_t>(&CodeGen)), Callback(CallbackFn<std::remove_reference_t<Callable>>), PrePostAction(nullptr) {} void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; } void operator()(CodeGenFunction &CGF) const; }; struct OMPTaskDataTy final { SmallVector<const Expr *, 4> PrivateVars; SmallVector<const Expr *, 4> PrivateCopies; SmallVector<const Expr *, 4> FirstprivateVars; SmallVector<const Expr *, 4> FirstprivateCopies; SmallVector<const Expr *, 4> FirstprivateInits; SmallVector<const Expr *, 4> LastprivateVars; SmallVector<const Expr *, 4> LastprivateCopies; SmallVector<const Expr *, 4> ReductionVars; SmallVector<const Expr *, 4> ReductionOrigs; SmallVector<const Expr *, 4> ReductionCopies; SmallVector<const Expr *, 4> ReductionOps; struct DependData { OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; const Expr *IteratorExpr = nullptr; SmallVector<const Expr *, 4> DepExprs; explicit DependData() = default; DependData(OpenMPDependClauseKind DepKind, const Expr *IteratorExpr) : DepKind(DepKind), IteratorExpr(IteratorExpr) {} }; SmallVector<DependData, 4> Dependences; llvm::PointerIntPair<llvm::Value *, 1, bool> Final; llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule; llvm::PointerIntPair<llvm::Value *, 1, bool> Priority; llvm::Value *Reductions = nullptr; unsigned NumberOfParts = 0; bool Tied = true; bool Nogroup = false; bool IsReductionWithTaskMod = false; bool IsWorksharingReduction = false; }; /// Class intended to support codegen of all kind of the reduction clauses. class ReductionCodeGen { private: /// Data required for codegen of reduction clauses. struct ReductionData { /// Reference to the item shared between tasks to reduce into. const Expr *Shared = nullptr; /// Reference to the original item. const Expr *Ref = nullptr; /// Helper expression for generation of private copy. const Expr *Private = nullptr; /// Helper expression for generation reduction operation. const Expr *ReductionOp = nullptr; ReductionData(const Expr *Shared, const Expr *Ref, const Expr *Private, const Expr *ReductionOp) : Shared(Shared), Ref(Ref), Private(Private), ReductionOp(ReductionOp) { } }; /// List of reduction-based clauses. SmallVector<ReductionData, 4> ClausesData; /// List of addresses of shared variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses; /// List of addresses of original variables/expressions. SmallVector<std::pair<LValue, LValue>, 4> OrigAddresses; /// Sizes of the reduction items in chars. SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes; /// Base declarations for the reduction items. SmallVector<const VarDecl *, 4> BaseDecls; /// Emits lvalue for shared expression. LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E); /// Emits upper bound for shared expression (if array section). LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E); /// Performs aggregate initialization. /// \param N Number of reduction item in the common list. /// \param PrivateAddr Address of the corresponding private item. /// \param SharedLVal Address of the original shared variable. /// \param DRD Declare reduction construct used for reduction item. void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, const OMPDeclareReductionDecl *DRD); public: ReductionCodeGen(ArrayRef<const Expr *> Shareds, ArrayRef<const Expr *> Origs, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> ReductionOps); /// Emits lvalue for the shared and original reduction item. /// \param N Number of the reduction item. void emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. void emitAggregateType(CodeGenFunction &CGF, unsigned N); /// Emits the code for the variable-modified type, if required. /// \param N Number of the reduction item. /// \param Size Size of the type in chars. void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size); /// Performs initialization of the private copy for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. /// \param DefaultInit Default initialization sequence that should be /// performed if no reduction specific initialization is found. /// \param SharedLVal Address of the original shared variable. void emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr, LValue SharedLVal, llvm::function_ref<bool(CodeGenFunction &)> DefaultInit); /// Returns true if the private copy requires cleanups. bool needCleanups(unsigned N); /// Emits cleanup code for the reduction item. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Adjusts \p PrivatedAddr for using instead of the original variable /// address in normal operations. /// \param N Number of the reduction item. /// \param PrivateAddr Address of the corresponding private item. Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N, Address PrivateAddr); /// Returns LValue for the reduction item. LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; } /// Returns LValue for the original reduction item. LValue getOrigLValue(unsigned N) const { return OrigAddresses[N].first; } /// Returns the size of the reduction item (in chars and total number of /// elements in the item), or nullptr, if the size is a constant. std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const { return Sizes[N]; } /// Returns the base declaration of the reduction item. const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; } /// Returns the base declaration of the reduction item. const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; } /// Returns true if the initialization of the reduction item uses initializer /// from declare reduction construct. bool usesReductionInitializer(unsigned N) const; }; class CGOpenMPRuntime { public: /// Allows to disable automatic handling of functions used in target regions /// as those marked as `omp declare target`. class DisableAutoDeclareTargetRAII { CodeGenModule &CGM; bool SavedShouldMarkAsGlobal; public: DisableAutoDeclareTargetRAII(CodeGenModule &CGM); ~DisableAutoDeclareTargetRAII(); }; /// Manages list of nontemporal decls for the specified directive. class NontemporalDeclsRAII { CodeGenModule &CGM; const bool NeedToPush; public: NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S); ~NontemporalDeclsRAII(); }; /// Maps the expression for the lastprivate variable to the global copy used /// to store new value because original variables are not mapped in inner /// parallel regions. Only private copies are captured but we need also to /// store private copy in shared address. /// Also, stores the expression for the private loop counter and it /// threaprivate name. struct LastprivateConditionalData { llvm::MapVector<CanonicalDeclPtr<const Decl>, SmallString<16>> DeclToUniqueName; LValue IVLVal; llvm::Function *Fn = nullptr; bool Disabled = false; }; /// Manages list of lastprivate conditional decls for the specified directive. class LastprivateConditionalRAII { enum class ActionToDo { DoNotPush, PushAsLastprivateConditional, DisableLastprivateConditional, }; CodeGenModule &CGM; ActionToDo Action = ActionToDo::DoNotPush; /// Check and try to disable analysis of inner regions for changes in /// lastprivate conditional. void tryToDisableInnerAnalysis(const OMPExecutableDirective &S, llvm::DenseSet<CanonicalDeclPtr<const Decl>> &NeedToAddForLPCsAsDisabled) const; LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S); public: explicit LastprivateConditionalRAII(CodeGenFunction &CGF, const OMPExecutableDirective &S, LValue IVLVal); static LastprivateConditionalRAII disable(CodeGenFunction &CGF, const OMPExecutableDirective &S); ~LastprivateConditionalRAII(); }; protected: CodeGenModule &CGM; StringRef FirstSeparator, Separator; /// Constructor allowing to redefine the name separator for the variables. explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator, StringRef Separator); /// Creates offloading entry for the provided entry ID \a ID, /// address \a Addr, size \a Size, and flags \a Flags. virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Helper to emit outlined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Lambda codegen specific to an accelerator device. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emits object of ident_t type with info for source location. /// \param Flags Flags for OpenMP location. /// llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags = 0); /// Returns pointer to ident_t type. llvm::Type *getIdentTyPointerTy(); /// Gets thread id value for the current thread. /// llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc); /// Get the function name of an outlined region. // The name can be customized depending on the target. // virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; } /// Emits \p Callee function call with arguments \p Args with location \p Loc. void emitCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee Callee, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits address of the word in a memory where current thread id is /// stored. virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc); void setLocThreadIdInsertPt(CodeGenFunction &CGF, bool AtCurrentPoint = false); void clearLocThreadIdInsertPt(CodeGenFunction &CGF); /// Check if the default location must be constant. /// Default is false to support OMPT/OMPD. virtual bool isDefaultLocationConstant() const { return false; } /// Returns additional flags that can be stored in reserved_2 field of the /// default location. virtual unsigned getDefaultLocationReserved2Flags() const { return 0; } /// Returns default flags for the barriers depending on the directive, for /// which this barier is going to be emitted. static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind); /// Get the LLVM type for the critical name. llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;} /// Returns corresponding lock object for the specified critical region /// name. If the lock object does not exist it is created, otherwise the /// reference to the existing copy is returned. /// \param CriticalName Name of the critical region. /// llvm::Value *getCriticalRegionLock(StringRef CriticalName); private: /// Default const ident_t object used for initialization of all other /// ident_t objects. llvm::Constant *DefaultOpenMPPSource = nullptr; using FlagsTy = std::pair<unsigned, unsigned>; /// Map of flags and corresponding default locations. using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>; OpenMPDefaultLocMapTy OpenMPDefaultLocMap; Address getOrCreateDefaultLocation(unsigned Flags); QualType IdentQTy; llvm::StructType *IdentTy = nullptr; /// Map for SourceLocation and OpenMP runtime library debug locations. typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy; OpenMPDebugLocMapTy OpenMPDebugLocMap; /// The type for a microtask which gets passed to __kmpc_fork_call(). /// Original representation is: /// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...); llvm::FunctionType *Kmpc_MicroTy = nullptr; /// Stores debug location and ThreadID for the function. struct DebugLocThreadIdTy { llvm::Value *DebugLoc; llvm::Value *ThreadID; /// Insert point for the service instructions. llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr; }; /// Map of local debug location, ThreadId and functions. typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy> OpenMPLocThreadIDMapTy; OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap; /// Map of UDRs and corresponding combiner/initializer. typedef llvm::DenseMap<const OMPDeclareReductionDecl *, std::pair<llvm::Function *, llvm::Function *>> UDRMapTy; UDRMapTy UDRMap; /// Map of functions and locally defined UDRs. typedef llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareReductionDecl *, 4>> FunctionUDRMapTy; FunctionUDRMapTy FunctionUDRMap; /// Map from the user-defined mapper declaration to its corresponding /// functions. llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap; /// Map of functions and their local user-defined mappers. using FunctionUDMMapTy = llvm::DenseMap<llvm::Function *, SmallVector<const OMPDeclareMapperDecl *, 4>>; FunctionUDMMapTy FunctionUDMMap; /// Maps local variables marked as lastprivate conditional to their internal /// types. llvm::DenseMap<llvm::Function *, llvm::DenseMap<CanonicalDeclPtr<const Decl>, std::tuple<QualType, const FieldDecl *, const FieldDecl *, LValue>>> LastprivateConditionalToTypes; /// Type kmp_critical_name, originally defined as typedef kmp_int32 /// kmp_critical_name[8]; llvm::ArrayType *KmpCriticalNameTy; /// An ordered map of auto-generated variables to their unique names. /// It stores variables with the following names: 1) ".gomp_critical_user_" + /// <critical_section_name> + ".var" for "omp critical" directives; 2) /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate /// variables. llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator> InternalVars; /// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *); llvm::Type *KmpRoutineEntryPtrTy = nullptr; QualType KmpRoutineEntryPtrQTy; /// Type typedef struct kmp_task { /// void * shareds; /**< pointer to block of pointers to /// shared vars */ /// kmp_routine_entry_t routine; /**< pointer to routine to call for /// executing task */ /// kmp_int32 part_id; /**< part id for the task */ /// kmp_routine_entry_t destructors; /* pointer to function to invoke /// deconstructors of firstprivate C++ objects */ /// } kmp_task_t; QualType KmpTaskTQTy; /// Saved kmp_task_t for task directive. QualType SavedKmpTaskTQTy; /// Saved kmp_task_t for taskloop-based directive. QualType SavedKmpTaskloopTQTy; /// Type typedef struct kmp_depend_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool in:1; /// bool out:1; /// } flags; /// } kmp_depend_info_t; QualType KmpDependInfoTy; /// Type typedef struct kmp_task_affinity_info { /// kmp_intptr_t base_addr; /// size_t len; /// struct { /// bool flag1 : 1; /// bool flag2 : 1; /// kmp_int32 reserved : 30; /// } flags; /// } kmp_task_affinity_info_t; QualType KmpTaskAffinityInfoTy; /// struct kmp_dim { // loop bounds info casted to kmp_int64 /// kmp_int64 lo; // lower /// kmp_int64 up; // upper /// kmp_int64 st; // stride /// }; QualType KmpDimTy; /// Type struct __tgt_offload_entry{ /// void *addr; // Pointer to the offload entry info. /// // (function or global) /// char *name; // Name of the function or global. /// size_t size; // Size of the entry info (0 if it a function). /// int32_t flags; /// int32_t reserved; /// }; QualType TgtOffloadEntryQTy; /// Entity that registers the offloading constants that were emitted so /// far. class OffloadEntriesInfoManagerTy { CodeGenModule &CGM; /// Number of entries registered so far. unsigned OffloadingEntriesNum = 0; public: /// Base class of the entries info. class OffloadEntryInfo { public: /// Kind of a given entry. enum OffloadingEntryInfoKinds : unsigned { /// Entry is a target region. OffloadingEntryInfoTargetRegion = 0, /// Entry is a declare target variable. OffloadingEntryInfoDeviceGlobalVar = 1, /// Invalid entry info. OffloadingEntryInfoInvalid = ~0u }; protected: OffloadEntryInfo() = delete; explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {} explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order, uint32_t Flags) : Flags(Flags), Order(Order), Kind(Kind) {} ~OffloadEntryInfo() = default; public: bool isValid() const { return Order != ~0u; } unsigned getOrder() const { return Order; } OffloadingEntryInfoKinds getKind() const { return Kind; } uint32_t getFlags() const { return Flags; } void setFlags(uint32_t NewFlags) { Flags = NewFlags; } llvm::Constant *getAddress() const { return cast_or_null<llvm::Constant>(Addr); } void setAddress(llvm::Constant *V) { assert(!Addr.pointsToAliveValue() && "Address has been set before!"); Addr = V; } static bool classof(const OffloadEntryInfo *Info) { return true; } private: /// Address of the entity that has to be mapped for offloading. llvm::WeakTrackingVH Addr; /// Flags associated with the device global. uint32_t Flags = 0u; /// Order this entry was emitted. unsigned Order = ~0u; OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid; }; /// Return true if a there are no entries defined. bool empty() const; /// Return number of entries defined so far. unsigned size() const { return OffloadingEntriesNum; } OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {} // // Target region entries related. // /// Kind of the target registry entry. enum OMPTargetRegionEntryKind : uint32_t { /// Mark the entry as target region. OMPTargetRegionEntryTargetRegion = 0x0, /// Mark the entry as a global constructor. OMPTargetRegionEntryCtor = 0x02, /// Mark the entry as a global destructor. OMPTargetRegionEntryDtor = 0x04, }; /// Target region entries info. class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo { /// Address that can be used as the ID of the entry. llvm::Constant *ID = nullptr; public: OffloadEntryInfoTargetRegion() : OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {} explicit OffloadEntryInfoTargetRegion(unsigned Order, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags), ID(ID) { setAddress(Addr); } llvm::Constant *getID() const { return ID; } void setID(llvm::Constant *V) { assert(!ID && "ID has been set before!"); ID = V; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoTargetRegion; } }; /// Initialize target region entry. void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, unsigned Order); /// Register target region entry. void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum, llvm::Constant *Addr, llvm::Constant *ID, OMPTargetRegionEntryKind Flags); /// Return true if a target region entry with the provided information /// exists. bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID, StringRef ParentName, unsigned LineNum) const; /// brief Applies action \a Action on all registered entries. typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned, const OffloadEntryInfoTargetRegion &)> OffloadTargetRegionEntryInfoActTy; void actOnTargetRegionEntriesInfo( const OffloadTargetRegionEntryInfoActTy &Action); // // Device global variable entries related. // /// Kind of the global variable entry.. enum OMPTargetGlobalVarEntryKind : uint32_t { /// Mark the entry as a to declare target. OMPTargetGlobalVarEntryTo = 0x0, /// Mark the entry as a to declare target link. OMPTargetGlobalVarEntryLink = 0x1, }; /// Device global variable entries info. class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo { /// Type of the global variable. CharUnits VarSize; llvm::GlobalValue::LinkageTypes Linkage; public: OffloadEntryInfoDeviceGlobalVar() : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {} explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order, OMPTargetGlobalVarEntryKind Flags) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {} explicit OffloadEntryInfoDeviceGlobalVar( unsigned Order, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage) : OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags), VarSize(VarSize), Linkage(Linkage) { setAddress(Addr); } CharUnits getVarSize() const { return VarSize; } void setVarSize(CharUnits Size) { VarSize = Size; } llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; } void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; } static bool classof(const OffloadEntryInfo *Info) { return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar; } }; /// Initialize device global variable entry. void initializeDeviceGlobalVarEntryInfo(StringRef Name, OMPTargetGlobalVarEntryKind Flags, unsigned Order); /// Register device global variable entry. void registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr, CharUnits VarSize, OMPTargetGlobalVarEntryKind Flags, llvm::GlobalValue::LinkageTypes Linkage); /// Checks if the variable with the given name has been registered already. bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const { return OffloadEntriesDeviceGlobalVar.count(VarName) > 0; } /// Applies action \a Action on all registered entries. typedef llvm::function_ref<void(StringRef, const OffloadEntryInfoDeviceGlobalVar &)> OffloadDeviceGlobalVarEntryInfoActTy; void actOnDeviceGlobalVarEntriesInfo( const OffloadDeviceGlobalVarEntryInfoActTy &Action); private: // Storage for target region entries kind. The storage is to be indexed by // file ID, device ID, parent function name and line number. typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion> OffloadEntriesTargetRegionPerLine; typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine> OffloadEntriesTargetRegionPerParentName; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName> OffloadEntriesTargetRegionPerFile; typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile> OffloadEntriesTargetRegionPerDevice; typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy; OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion; /// Storage for device global variable entries kind. The storage is to be /// indexed by mangled name. typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar> OffloadEntriesDeviceGlobalVarTy; OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar; }; OffloadEntriesInfoManagerTy OffloadEntriesInfoManager; bool ShouldMarkAsGlobal = true; /// List of the emitted declarations. llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls; /// List of the global variables with their addresses that should not be /// emitted for the target. llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables; /// List of variables that can become declare target implicitly and, thus, /// must be emitted. llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables; using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>; /// Stack for list of declarations in current context marked as nontemporal. /// The set is the union of all current stack elements. llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack; /// Stack for list of addresses of declarations in current context marked as /// lastprivate conditional. The set is the union of all current stack /// elements. llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack; /// Flag for keeping track of weather a requires unified_shared_memory /// directive is present. bool HasRequiresUnifiedSharedMemory = false; /// Atomic ordering from the omp requires directive. llvm::AtomicOrdering RequiresAtomicOrdering = llvm::AtomicOrdering::Monotonic; /// Flag for keeping track of weather a target region has been emitted. bool HasEmittedTargetRegion = false; /// Flag for keeping track of weather a device routine has been emitted. /// Device routines are specific to the bool HasEmittedDeclareTargetRegion = false; /// Loads all the offload entries information from the host IR /// metadata. void loadOffloadInfoMetadata(); /// Returns __tgt_offload_entry type. QualType getTgtOffloadEntryQTy(); /// Start scanning from statement \a S and and emit all target regions /// found along the way. /// \param S Starting statement. /// \param ParentName Name of the function declaration that is being scanned. void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName); /// Build type kmp_routine_entry_t (if not built yet). void emitKmpRoutineEntryT(QualType KmpInt32Ty); /// Returns pointer to kmpc_micro type. llvm::Type *getKmpc_MicroPointerTy(); /// Returns __kmpc_for_static_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_init_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_next_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize, bool IVSigned); /// Returns __kmpc_dispatch_fini_* runtime function for the specified /// size \a IVSize and sign \a IVSigned. llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize, bool IVSigned); /// If the specified mangled name is not in the module, create and /// return threadprivate cache object. This object is a pointer's worth of /// storage that's reserved for use by the OpenMP runtime. /// \param VD Threadprivate variable. /// \return Cache variable for the specified threadprivate. llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD); /// Gets (if variable with the given name already exist) or creates /// internal global variable with the specified Name. The created variable has /// linkage CommonLinkage by default and is initialized by null value. /// \param Ty Type of the global variable. If it is exist already the type /// must be the same. /// \param Name Name of the variable. llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty, const llvm::Twine &Name, unsigned AddressSpace = 0); /// Set of threadprivate variables with the generated initializer. llvm::StringSet<> ThreadPrivateWithDefinition; /// Set of declare target variables with the generated initializer. llvm::StringSet<> DeclareTargetWithDefinition; /// Emits initialization code for the threadprivate variables. /// \param VDAddr Address of the global variable \a VD. /// \param Ctor Pointer to a global init function for \a VD. /// \param CopyCtor Pointer to a global copy function for \a VD. /// \param Dtor Pointer to a global destructor function for \a VD. /// \param Loc Location of threadprivate declaration. void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr, llvm::Value *Ctor, llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc); /// Emit the array initialization or deletion portion for user-defined mapper /// code generation. void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF, llvm::Value *Handle, llvm::Value *BasePtr, llvm::Value *Ptr, llvm::Value *Size, llvm::Value *MapType, CharUnits ElementSize, llvm::BasicBlock *ExitBB, bool IsInit); struct TaskResultTy { llvm::Value *NewTask = nullptr; llvm::Function *TaskEntry = nullptr; llvm::Value *NewTaskNewTaskTTy = nullptr; LValue TDBase; const RecordDecl *KmpTaskTQTyRD = nullptr; llvm::Value *TaskDupFn = nullptr; }; /// Emit task region for the task directive. The task region is emitted in /// several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const OMPTaskDataTy &Data); /// Returns default address space for the constant firstprivates, 0 by /// default. virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; } /// Emit code that pushes the trip count of loops associated with constructs /// 'target teams distribute' and 'teams distribute parallel for'. /// \param SizeEmitter Emits the int64 value for the number of iterations of /// the associated loop. void emitTargetNumIterationsCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Value *DeviceID, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit update for lastprivate conditional data. void emitLastprivateConditionalUpdate(CodeGenFunction &CGF, LValue IVLVal, StringRef UniqueDeclName, LValue LVal, SourceLocation Loc); /// Returns the number of the elements and the address of the depobj /// dependency array. /// \return Number of elements in depobj array and the pointer to the array of /// dependencies. std::pair<llvm::Value *, LValue> getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); public: explicit CGOpenMPRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM, ".", ".") {} virtual ~CGOpenMPRuntime() {} virtual void clear(); /// Emits code for OpenMP 'if' clause using specified \a CodeGen /// function. Here is the logic: /// if (Cond) { /// ThenGen(); /// } else { /// ElseGen(); /// } void emitIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen); /// Checks if the \p Body is the \a CompoundStmt and returns its child /// statement iff there is only one that is not evaluatable at the compile /// time. static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body); /// Get the platform-specific name separator. std::string getName(ArrayRef<StringRef> Parts) const; /// Emit code for the specified user defined reduction construct. virtual void emitUserDefinedReduction(CodeGenFunction *CGF, const OMPDeclareReductionDecl *D); /// Get combiner/initializer for the specified user-defined reduction, if any. virtual std::pair<llvm::Function *, llvm::Function *> getUserDefinedReduction(const OMPDeclareReductionDecl *D); /// Emit the function for the user defined mapper construct. void emitUserDefinedMapper(const OMPDeclareMapperDecl *D, CodeGenFunction *CGF = nullptr); /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitParallelOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. virtual llvm::Function *emitTeamsOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen); /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// virtual llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts); /// Cleans up references to the objects in finished function. /// virtual void functionFinished(CodeGenFunction &CGF); /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond); /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr); /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. virtual void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc); /// Emits code for a taskyield directive. virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. virtual void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc); /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. virtual void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps); /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. virtual void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads); /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false); /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// This kind of distribute directive is emitted without outer loop. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static chunked. /// \param ScheduleKind Schedule kind specified in the 'schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is static non-chunked. /// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause. /// \param Chunked True if chunk is specified in the clause. /// virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind, bool Chunked) const; /// Check if the specified \a ScheduleKind is dynamic. /// This kind of worksharing directive is emitted without outer loop. /// \param ScheduleKind Schedule Kind specified in the 'schedule' clause. /// virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const; /// struct with the values to be passed to the dispatch runtime function struct DispatchRTInput { /// Loop lower bound llvm::Value *LB = nullptr; /// Loop upper bound llvm::Value *UB = nullptr; /// Chunk size specified using 'schedule' clause (nullptr if chunk /// was not specified) llvm::Value *Chunk = nullptr; DispatchRTInput() = default; DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk) : LB(LB), UB(UB), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues); /// Struct with the values to be passed to the static runtime function struct StaticRTInput { /// Size of the iteration variable in bits. unsigned IVSize = 0; /// Sign of the iteration variable. bool IVSigned = false; /// true if loop is ordered, false otherwise. bool Ordered = false; /// Address of the output variable in which the flag of the last iteration /// is returned. Address IL = Address::invalid(); /// Address of the output variable in which the lower iteration number is /// returned. Address LB = Address::invalid(); /// Address of the output variable in which the upper iteration number is /// returned. Address UB = Address::invalid(); /// Address of the output variable in which the stride value is returned /// necessary to generated the static_chunked scheduled loop. Address ST = Address::invalid(); /// Value of the chunk for the static_chunked scheduled loop. For the /// default (nullptr) value, the chunk 1 will be used. llvm::Value *Chunk = nullptr; StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL, Address LB, Address UB, Address ST, llvm::Value *Chunk = nullptr) : IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB), UB(UB), ST(ST), Chunk(Chunk) {} }; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values); /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// virtual void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values); /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned); /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind); /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST); /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc); /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. virtual void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc); /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc); /// Returns the address of the variable marked as declare target with link /// clause OR as declare target with to clause and unified memory. virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD); /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. virtual llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr); /// Emit a code for initialization of declare target variable. /// \param VD Declare target variable. /// \param Addr Address of the global variable \a VD. /// \param PerformInit true if initialization expression is not constant. virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD, llvm::GlobalVariable *Addr, bool PerformInit); /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name); /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO); /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data); /// Emit code for the directive that does not require outlining. /// /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param HasCancel true if region has inner cancel directive, false /// otherwise. virtual void emitInlinedDirective(CodeGenFunction &CGF, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool HasCancel = false); /// Emits reduction function. /// \param ArgsType Array type containing pointers to reduction variables. /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. llvm::Function *emitReductionFunction(SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps); /// Emits single reduction combiner void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS); struct ReductionOptionsTy { bool WithNowait; bool SimpleReduction; OpenMPDirectiveKind ReductionKind; }; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options); /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data); /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode virtual void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction); /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N); /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal); /// Emit code for 'taskwait' directive. virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc); /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// virtual void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion); /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion); /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen); /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. /// \param SizeEmitter Callback to emit number of iterations for loop-based /// directives. virtual void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter); /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. virtual bool emitTargetFunctions(GlobalDecl GD); /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. virtual bool emitTargetGlobalVariable(GlobalDecl GD); /// Checks if the provided global decl \a GD is a declare target variable and /// registers it when emitting code for the host. virtual void registerTargetGlobalVariable(const VarDecl *VD, llvm::Constant *Addr); /// Registers provided target firstprivate variable as global on the /// target. llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF, const VarDecl *VD); /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. virtual bool emitTargetGlobal(GlobalDecl GD); /// Creates and returns a registration function for when at least one /// requires directives was used in the current module. llvm::Function *emitRequiresDirectiveRegFun(); /// Creates all the offload entries in the current compilation unit /// along with the associated metadata. void createOffloadEntriesAndInfoMetadata(); /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// virtual void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars); /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc); /// Struct that keeps all the relevant information that should be kept /// throughout a 'target data' region. class TargetDataInfo { /// Set to true if device pointer information have to be obtained. bool RequiresDevicePointerInfo = false; public: /// The array of base pointer passed to the runtime library. llvm::Value *BasePointersArray = nullptr; /// The array of section pointers passed to the runtime library. llvm::Value *PointersArray = nullptr; /// The array of sizes passed to the runtime library. llvm::Value *SizesArray = nullptr; /// The array of map types passed to the runtime library. llvm::Value *MapTypesArray = nullptr; /// The total number of pointers passed to the runtime library. unsigned NumberOfPtrs = 0u; /// Map between the a declaration of a capture and the corresponding base /// pointer address where the runtime returns the device pointers. llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap; explicit TargetDataInfo() {} explicit TargetDataInfo(bool RequiresDevicePointerInfo) : RequiresDevicePointerInfo(RequiresDevicePointerInfo) {} /// Clear information about the data arrays. void clearArrayInfo() { BasePointersArray = nullptr; PointersArray = nullptr; SizesArray = nullptr; MapTypesArray = nullptr; NumberOfPtrs = 0u; } /// Return true if the current target data information has valid arrays. bool isValid() { return BasePointersArray && PointersArray && SizesArray && MapTypesArray && NumberOfPtrs; } bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; } }; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. virtual void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info); /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device); /// Marks function \a Fn with properly mangled versions of vector functions. /// \param FD Function marked as 'declare simd'. /// \param Fn LLVM function that must be marked with 'declare simd' /// attributes. virtual void emitDeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn); /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations); /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. virtual void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C); /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. virtual const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const { return NativeParam; } /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. virtual Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const; /// Choose default schedule type and chunk value for the /// dist_schedule clause. virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind, llvm::Value *&Chunk) const {} /// Choose default schedule type and chunk value for the /// schedule clause. virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF, const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind, const Expr *&ChunkExpr) const; /// Emits call of the outlined function with the provided arguments, /// translating these arguments to correct target-specific arguments. virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::FunctionCallee OutlinedFn, ArrayRef<llvm::Value *> Args = llvm::None) const; /// Emits OpenMP-specific function prolog. /// Required for device constructs. virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D); /// Gets the OpenMP-specific address of the local variable. virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD); /// Marks the declaration as already emitted for the device code and returns /// true, if it was marked already, and false, otherwise. bool markAsGlobalTarget(GlobalDecl GD); /// Emit deferred declare target variables marked for deferred emission. void emitDeferredTargetDecls() const; /// Adjust some parameters for the target-based directives, like addresses of /// the variables captured by reference in lambdas. virtual void adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF, const OMPExecutableDirective &D) const; /// Perform check on requires decl to ensure that target architecture /// supports unified addressing virtual void processRequiresDirective(const OMPRequiresDecl *D); /// Gets default memory ordering as specified in requires directive. llvm::AtomicOrdering getDefaultMemoryOrdering() const; /// Checks if the variable has associated OMPAllocateDeclAttr attribute with /// the predefined allocator and translates it into the corresponding address /// space. virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS); /// Return whether the unified_shared_memory has been specified. bool hasRequiresUnifiedSharedMemory() const; /// Checks if the \p VD variable is marked as nontemporal declaration in /// current context. bool isNontemporalDecl(const ValueDecl *VD) const; /// Create specialized alloca to handle lastprivate conditionals. Address emitLastprivateConditionalInit(CodeGenFunction &CGF, const VarDecl *VD); /// Checks if the provided \p LVal is lastprivate conditional and emits the /// code to update the value of the original variable. /// \code /// lastprivate(conditional: a) /// ... /// <type> a; /// lp_a = ...; /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// \endcode virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF, const Expr *LHS); /// Checks if the lastprivate conditional was updated in inner region and /// writes the value. /// \code /// lastprivate(conditional: a) /// ... /// <type> a;bool Fired = false; /// #pragma omp ... shared(a) /// { /// lp_a = ...; /// Fired = true; /// } /// if (Fired) { /// #pragma omp critical(a) /// if (last_iv_a <= iv) { /// last_iv_a = iv; /// global_a = lp_a; /// } /// Fired = false; /// } /// \endcode virtual void checkAndEmitSharedLastprivateConditional( CodeGenFunction &CGF, const OMPExecutableDirective &D, const llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> &IgnoredDecls); /// Gets the address of the global copy used for lastprivate conditional /// update, if any. /// \param PrivLVal LValue for the private copy. /// \param VD Original lastprivate declaration. virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF, LValue PrivLVal, const VarDecl *VD, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs). /// \returns Pointer to the first element of the array casted to VoidPtr type. std::pair<llvm::Value *, Address> emitDependClause(CodeGenFunction &CGF, ArrayRef<OMPTaskDataTy::DependData> Dependencies, SourceLocation Loc); /// Emits list of dependecies based on the provided data (array of /// dependence/expression pairs) for depobj construct. In this case, the /// variable is allocated in dynamically. \returns Pointer to the first /// element of the array casted to VoidPtr type. Address emitDepobjDependClause(CodeGenFunction &CGF, const OMPTaskDataTy::DependData &Dependencies, SourceLocation Loc); /// Emits the code to destroy the dependency object provided in depobj /// directive. void emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal, SourceLocation Loc); /// Updates the dependency kind in the specified depobj object. /// \param DepobjLVal LValue for the main depobj object. /// \param NewDepKind New dependency kind. void emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal, OpenMPDependClauseKind NewDepKind, SourceLocation Loc); /// Initializes user defined allocators specified in the uses_allocators /// clauses. void emitUsesAllocatorsInit(CodeGenFunction &CGF, const Expr *Allocator, const Expr *AllocatorTraits); /// Destroys user defined allocators specified in the uses_allocators clause. void emitUsesAllocatorsFini(CodeGenFunction &CGF, const Expr *Allocator); }; /// Class supports emissionof SIMD-only code. class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime { public: explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {} ~CGOpenMPSIMDRuntime() override {} /// Emits outlined function for the specified OpenMP parallel directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the specified OpenMP teams directive /// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID, /// kmp_int32 BoundID, struct context_vars*). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. llvm::Function * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override; /// Emits outlined function for the OpenMP task directive \a D. This /// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t* /// TaskT). /// \param D OpenMP directive. /// \param ThreadIDVar Variable for thread id in the current OpenMP region. /// \param PartIDVar Variable for partition id in the current OpenMP untied /// task region. /// \param TaskTVar Variable for task_t argument. /// \param InnermostKind Kind of innermost directive (for simple directives it /// is a directive itself, for combined - its innermost directive). /// \param CodeGen Code generation sequence for the \a D directive. /// \param Tied true if task is generated for tied task, false otherwise. /// \param NumberOfParts Number of parts in untied task. Ignored for tied /// tasks. /// llvm::Function *emitTaskOutlinedFunction( const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, const VarDecl *PartIDVar, const VarDecl *TaskTVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen, bool Tied, unsigned &NumberOfParts) override; /// Emits code for parallel or serial call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run in parallel threads. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) override; /// Emits a critical region. /// \param CriticalName Name of the critical region. /// \param CriticalOpGen Generator for the statement associated with the given /// critical region. /// \param Hint Value of the 'hint' clause (optional). void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName, const RegionCodeGenTy &CriticalOpGen, SourceLocation Loc, const Expr *Hint = nullptr) override; /// Emits a master region. /// \param MasterOpGen Generator for the statement associated with the given /// master region. void emitMasterRegion(CodeGenFunction &CGF, const RegionCodeGenTy &MasterOpGen, SourceLocation Loc) override; /// Emits code for a taskyield directive. void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit a taskgroup region. /// \param TaskgroupOpGen Generator for the statement associated with the /// given taskgroup region. void emitTaskgroupRegion(CodeGenFunction &CGF, const RegionCodeGenTy &TaskgroupOpGen, SourceLocation Loc) override; /// Emits a single region. /// \param SingleOpGen Generator for the statement associated with the given /// single region. void emitSingleRegion(CodeGenFunction &CGF, const RegionCodeGenTy &SingleOpGen, SourceLocation Loc, ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs, ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps) override; /// Emit an ordered region. /// \param OrderedOpGen Generator for the statement associated with the given /// ordered region. void emitOrderedRegion(CodeGenFunction &CGF, const RegionCodeGenTy &OrderedOpGen, SourceLocation Loc, bool IsThreads) override; /// Emit an implicit/explicit barrier for OpenMP threads. /// \param Kind Directive for which this implicit barrier call must be /// generated. Must be OMPD_barrier for explicit barrier generation. /// \param EmitChecks true if need to emit checks for cancellation barriers. /// \param ForceSimpleCall true simple barrier call must be emitted, false if /// runtime class decides which one to emit (simple or with cancellation /// checks). /// void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind Kind, bool EmitChecks = true, bool ForceSimpleCall = false) override; /// This is used for non static scheduled types and when the ordered /// clause is present on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds \a LB and \a UB and stride \a ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param Ordered true if loop is ordered, false otherwise. /// \param DispatchValues struct containing llvm values for lower bound, upper /// bound, and chunk expression. /// For the default (nullptr) value, the chunk 1 will be used. /// void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc, const OpenMPScheduleTy &ScheduleKind, unsigned IVSize, bool IVSigned, bool Ordered, const DispatchRTInput &DispatchValues) override; /// Call the appropriate runtime routine to initialize it before start /// of loop. /// /// This is used only in case of static schedule, when the user did not /// specify a ordered clause on the loop construct. /// Depending on the loop schedule, it is necessary to call some runtime /// routine before start of the OpenMP loop to get the loop upper / lower /// bounds LB and UB and stride ST. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive. /// \param ScheduleKind Schedule kind, specified by the 'schedule' clause. /// \param Values Input arguments for the construct. /// void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind, const OpenMPScheduleTy &ScheduleKind, const StaticRTInput &Values) override; /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause. /// \param Values Input arguments for the construct. /// void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDistScheduleClauseKind SchedKind, const StaticRTInput &Values) override; /// Call the appropriate runtime routine to notify that we finished /// iteration of the ordered loop with the dynamic scheduling. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned) override; /// Call the appropriate runtime routine to notify that we finished /// all the work with current loop. /// /// \param CGF Reference to current CodeGenFunction. /// \param Loc Clang source location. /// \param DKind Kind of the directive for which the static finish is emitted. /// void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind DKind) override; /// Call __kmpc_dispatch_next( /// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter, /// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper, /// kmp_int[32|64] *p_stride); /// \param IVSize Size of the iteration variable in bits. /// \param IVSigned Sign of the iteration variable. /// \param IL Address of the output variable in which the flag of the /// last iteration is returned. /// \param LB Address of the output variable in which the lower iteration /// number is returned. /// \param UB Address of the output variable in which the upper iteration /// number is returned. /// \param ST Address of the output variable in which the stride value is /// returned. llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc, unsigned IVSize, bool IVSigned, Address IL, Address LB, Address UB, Address ST) override; /// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_threads) to generate code for 'num_threads' /// clause. /// \param NumThreads An integer value of threads. void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override; /// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 /// global_tid, int proc_bind) to generate code for 'proc_bind' clause. void emitProcBindClause(CodeGenFunction &CGF, llvm::omp::ProcBindKind ProcBind, SourceLocation Loc) override; /// Returns address of the threadprivate variable for the current /// thread. /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of the reference to threadprivate var. /// \return Address of the threadprivate variable for the current thread. Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr, SourceLocation Loc) override; /// Emit a code for initialization of threadprivate variable. It emits /// a call to runtime library which adds initial value to the newly created /// threadprivate variable (if it is not constant) and registers destructor /// for the variable (if any). /// \param VD Threadprivate variable. /// \param VDAddr Address of the global variable \a VD. /// \param Loc Location of threadprivate declaration. /// \param PerformInit true if initialization expression is not constant. llvm::Function * emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr, SourceLocation Loc, bool PerformInit, CodeGenFunction *CGF = nullptr) override; /// Creates artificial threadprivate variable with name \p Name and type \p /// VarType. /// \param VarType Type of the artificial threadprivate variable. /// \param Name Name of the artificial threadprivate variable. Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF, QualType VarType, StringRef Name) override; /// Emit flush of the variables specified in 'omp flush' directive. /// \param Vars List of variables to flush. void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars, SourceLocation Loc, llvm::AtomicOrdering AO) override; /// Emit task region for the task directive. The task region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid, /// kmp_task_t *new_task), where new_task is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPExecutableDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit task region for the taskloop directive. The taskloop region is /// emitted in several steps: /// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32 /// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds, /// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the /// function: /// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) { /// TaskFunction(gtid, tt->part_id, tt->shareds); /// return 0; /// } /// 2. Copy a list of shared variables to field shareds of the resulting /// structure kmp_task_t returned by the previous call (if any). /// 3. Copy a pointer to destructions function to field destructions of the /// resulting structure kmp_task_t. /// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t /// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int /// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task /// is a resulting structure from /// previous items. /// \param D Current task directive. /// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32 /// /*part_id*/, captured_struct */*__context*/); /// \param SharedsTy A type which contains references the shared variables. /// \param Shareds Context with the list of shared variables from the \p /// TaskFunction. /// \param IfCond Not a nullptr if 'if' clause was specified, nullptr /// otherwise. /// \param Data Additional data for task generation like tiednsee, final /// state, list of privates etc. void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc, const OMPLoopDirective &D, llvm::Function *TaskFunction, QualType SharedsTy, Address Shareds, const Expr *IfCond, const OMPTaskDataTy &Data) override; /// Emit a code for reduction clause. Next code should be emitted for /// reduction: /// \code /// /// static kmp_critical_name lock = { 0 }; /// /// void reduce_func(void *lhs[<n>], void *rhs[<n>]) { /// ... /// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]); /// ... /// } /// /// ... /// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]}; /// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList), /// RedList, reduce_func, &<lock>)) { /// case 1: /// ... /// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]); /// ... /// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>); /// break; /// case 2: /// ... /// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i])); /// ... /// break; /// default:; /// } /// \endcode /// /// \param Privates List of private copies for original reduction arguments. /// \param LHSExprs List of LHS in \a ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a ReductionOps reduction operations. /// \param ReductionOps List of reduction operations in form 'LHS binop RHS' /// or 'operator binop(LHS, RHS)'. /// \param Options List of options for reduction codegen: /// WithNowait true if parent directive has also nowait clause, false /// otherwise. /// SimpleReduction Emit reduction operation only. Used for omp simd /// directive on the host. /// ReductionKind The kind of reduction to perform. void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) override; /// Emit a code for initialization of task reduction clause. Next code /// should be emitted for reduction: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_init(gtid, n, red_data); /// \endcode /// For reduction clause with task modifier it emits the next call: /// \code /// /// _taskred_item_t red_data[n]; /// ... /// red_data[i].shar = &shareds[i]; /// red_data[i].orig = &origs[i]; /// red_data[i].size = sizeof(origs[i]); /// red_data[i].f_init = (void*)RedInit<i>; /// red_data[i].f_fini = (void*)RedDest<i>; /// red_data[i].f_comb = (void*)RedOp<i>; /// red_data[i].flags = <Flag_i>; /// ... /// void* tg1 = __kmpc_taskred_modifier_init(loc, gtid, is_worksharing, n, /// red_data); /// \endcode /// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations. /// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations. /// \param Data Additional data for task generation like tiedness, final /// state, list of privates, reductions etc. llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs, const OMPTaskDataTy &Data) override; /// Emits the following code for reduction clause with task modifier: /// \code /// __kmpc_task_reduction_modifier_fini(loc, gtid, is_worksharing); /// \endcode void emitTaskReductionFini(CodeGenFunction &CGF, SourceLocation Loc, bool IsWorksharingReduction) override; /// Required to resolve existing problems in the runtime. Emits threadprivate /// variables to store the size of the VLAs/array sections for /// initializer/combiner/finalizer functions + emits threadprivate variable to /// store the pointer to the original reduction item for the custom /// initializer defined by declare reduction construct. /// \param RCG Allows to reuse an existing data for the reductions. /// \param N Reduction item for which fixups must be emitted. void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc, ReductionCodeGen &RCG, unsigned N) override; /// Get the address of `void *` type of the privatue copy of the reduction /// item specified by the \p SharedLVal. /// \param ReductionsPtr Pointer to the reduction data returned by the /// emitTaskReductionInit function. /// \param SharedLVal Address of the original reduction item. Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *ReductionsPtr, LValue SharedLVal) override; /// Emit code for 'taskwait' directive. void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override; /// Emit code for 'cancellation point' construct. /// \param CancelRegion Region kind for which the cancellation point must be /// emitted. /// void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc, OpenMPDirectiveKind CancelRegion) override; /// Emit code for 'cancel' construct. /// \param IfCond Condition in the associated 'if' clause, if it was /// specified, nullptr otherwise. /// \param CancelRegion Region kind for which the cancel must be emitted. /// void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc, const Expr *IfCond, OpenMPDirectiveKind CancelRegion) override; /// Emit outilined function for 'target' directive. /// \param D Directive to emit. /// \param ParentName Name of the function that encloses the target region. /// \param OutlinedFn Outlined function value to be defined by this call. /// \param OutlinedFnID Outlined function ID value to be defined by this call. /// \param IsOffloadEntry True if the outlined function is an offload entry. /// \param CodeGen Code generation sequence for the \a D directive. /// An outlined function may not be an entry if, e.g. the if clause always /// evaluates to false. void emitTargetOutlinedFunction(const OMPExecutableDirective &D, StringRef ParentName, llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID, bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) override; /// Emit the target offloading code associated with \a D. The emitted /// code attempts offloading the execution to the device, an the event of /// a failure it executes the host version outlined in \a OutlinedFn. /// \param D Directive to emit. /// \param OutlinedFn Host version of the code to be offloaded. /// \param OutlinedFnID ID of host version of the code to be offloaded. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used and device modifier. void emitTargetCall( CodeGenFunction &CGF, const OMPExecutableDirective &D, llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID, const Expr *IfCond, llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device, llvm::function_ref<llvm::Value *(CodeGenFunction &CGF, const OMPLoopDirective &D)> SizeEmitter) override; /// Emit the target regions enclosed in \a GD function definition or /// the function itself in case it is a valid device function. Returns true if /// \a GD was dealt with successfully. /// \param GD Function to scan. bool emitTargetFunctions(GlobalDecl GD) override; /// Emit the global variable if it is a valid device global variable. /// Returns true if \a GD was dealt with successfully. /// \param GD Variable declaration to emit. bool emitTargetGlobalVariable(GlobalDecl GD) override; /// Emit the global \a GD if it is meaningful for the target. Returns /// if it was emitted successfully. /// \param GD Global to scan. bool emitTargetGlobal(GlobalDecl GD) override; /// Emits code for teams call of the \a OutlinedFn with /// variables captured in a record which address is stored in \a /// CapturedStruct. /// \param OutlinedFn Outlined function to be run by team masters. Type of /// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*). /// \param CapturedVars A pointer to the record with the references to /// variables used in \a OutlinedFn function. /// void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Function *OutlinedFn, ArrayRef<llvm::Value *> CapturedVars) override; /// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32 /// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code /// for num_teams clause. /// \param NumTeams An integer expression of teams. /// \param ThreadLimit An integer expression of threads. void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override; /// Emit the target data mapping code associated with \a D. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the /// target directive, or null if no device clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. /// \param Info A record used to store information that needs to be preserved /// until the region is closed. void emitTargetDataCalls(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device, const RegionCodeGenTy &CodeGen, TargetDataInfo &Info) override; /// Emit the data mapping/movement code associated with the directive /// \a D that should be of the form 'target [{enter|exit} data | update]'. /// \param D Directive to emit. /// \param IfCond Expression evaluated in if clause associated with the target /// directive, or null if no if clause is used. /// \param Device Expression evaluated in device clause associated with the /// target directive, or null if no device clause is used. void emitTargetDataStandAloneCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, const Expr *IfCond, const Expr *Device) override; /// Emit initialization for doacross loop nesting support. /// \param D Loop-based construct used in doacross nesting construct. void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D, ArrayRef<Expr *> NumIterations) override; /// Emit code for doacross ordered directive with 'depend' clause. /// \param C 'depend' clause with 'sink|source' dependency kind. void emitDoacrossOrdered(CodeGenFunction &CGF, const OMPDependClause *C) override; /// Translates the native parameter of outlined function if this is required /// for target. /// \param FD Field decl from captured record for the parameter. /// \param NativeParam Parameter itself. const VarDecl *translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override; /// Gets the address of the native argument basing on the address of the /// target-specific parameter. /// \param NativeParam Parameter itself. /// \param TargetParam Corresponding target-specific parameter. Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override; /// Gets the OpenMP-specific address of the local variable. Address getAddressOfLocalVariable(CodeGenFunction &CGF, const VarDecl *VD) override { return Address::invalid(); } }; } // namespace CodeGen } // namespace clang #endif
renderer.c
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <float.h> #include "vec.h" #include "renderer.h" #include "intersection.h" void initRenderer(Renderer* renderer, int width, int height, float hview, float vview) { renderer->width = width; renderer->height = height; renderer->horizontal_view = hview; renderer->vertical_view = vview; renderer->position = createVec3(0, 0, 0); renderer->direction = createVec3(0, 0, -1); renderer->up = createVec3(0, 1, 0); renderer->void_color = createVec3(0, 0, 0); renderer->pixel_samples = 128; renderer->depth = 100; renderer->specular_depth_cost = 20; renderer->diffuse_depth_cost = 30; renderer->transmition_depth_cost = 5; renderer->buffer = (Color*)malloc(sizeof(Color) * width * height); } void freeRenderer(Renderer* renderer) { free(renderer->buffer); } #include <assert.h> static Color computeRadiance(Ray* ray, Scene* scene, Renderer* renderer, int depth) { if (depth <= 0) { return renderer->void_color; } else { Intersection intersection = { .dist = INFINITY, // Maximum distance }; if (testRayBvhIntersection(ray, scene->bvh, &intersection)) { Vec3 vert0 = scene->vertecies[scene->vertex_indices[intersection.triangle_id][0]]; Vec3 vert1 = scene->vertecies[scene->vertex_indices[intersection.triangle_id][1]]; Vec3 vert2 = scene->vertecies[scene->vertex_indices[intersection.triangle_id][2]]; Vec3 vert = addVec3( scaleVec3(vert0, 1 - intersection.u - intersection.v), addVec3( scaleVec3(vert1, intersection.u), scaleVec3(vert2, intersection.v) ) ); Vec3 norm0 = scene->normals[scene->normal_indices[intersection.triangle_id][0]]; Vec3 norm1 = scene->normals[scene->normal_indices[intersection.triangle_id][1]]; Vec3 norm2 = scene->normals[scene->normal_indices[intersection.triangle_id][2]]; Vec3 normal = addVec3( scaleVec3(norm0, 1 - intersection.u - intersection.v), addVec3( scaleVec3(norm1, intersection.u), scaleVec3(norm2, intersection.v) ) ); bool outside = true; if (dotVec3(normal, ray->direction) > 0) { outside = false; normal = scaleVec3(normal, -1); } int object_id = scene->object_ids[intersection.triangle_id]; MaterialProperties* material = &scene->objects[object_id].material; Color c = material->emission_color; if (depth - renderer->diffuse_depth_cost > 0) { if (!isVec3Null(material->diffuse_color)) { Ray new_ray = createRay(vert, randomVec3InDirection(normal, 1, 1)); Color color = computeRadiance(&new_ray, scene, renderer, depth - renderer->diffuse_depth_cost); Color diffuse_color = mulVec3(color, material->diffuse_color); c = addVec3(c, diffuse_color); } } if (material->specular_sharpness != 0) { if (material->transmitability > 0.0 && !isVec3Null(material->transmition_color)) { float n1 = outside ? 1.0 : material->index_of_refraction; float n2 = outside ? material->index_of_refraction : 1.0; float cosO = -dotVec3(ray->direction, normal); float r0 = (n1 - n2) / (n1 + n2); r0 *= r0; float refl = r0 + (1 - r0) * powf(1 - cosO, 5); if (refl * (float)RAND_MAX > rand()) { if (depth - renderer->specular_depth_cost > 0) { Vec3 reflection = subVec3(ray->direction, scaleVec3(normal, 2 * dotVec3(ray->direction, normal))); Vec3 direction = randomVec3InDirection(reflection, 1, material->specular_sharpness); Ray new_ray = createRay(vert, direction); Color color = computeRadiance(&new_ray, scene, renderer, depth - renderer->specular_depth_cost); Color reflection_color = mulVec3(color, material->specular_color); c = addVec3(c, reflection_color); } } else { if (depth - renderer->transmition_depth_cost > 0) { float angle = acosf(cosO); float sinO = sinf(angle); Vec3 transmition = addVec3(scaleVec3(ray->direction, n1 / n2), scaleVec3(normal, (cosO * n1 / n2 - sqrtf(1 - sinO * sinO)))); Vec3 direction = randomVec3InDirection(transmition, 1, material->specular_sharpness); Ray new_ray = createRay(vert, direction); Color color = computeRadiance(&new_ray, scene, renderer, depth - renderer->transmition_depth_cost); Color reflection_color = scaleVec3(color, material->transmitability); reflection_color = mulVec3(reflection_color, material->transmition_color); c = addVec3(c, reflection_color); } } } else if (depth - renderer->specular_depth_cost > 0) { if (!isVec3Null(material->specular_color)) { Vec3 reflection = subVec3(ray->direction, scaleVec3(normal, 2 * dotVec3(ray->direction, normal))); Vec3 direction = randomVec3InDirection(reflection, 1, material->specular_sharpness); Ray new_ray = createRay(vert, direction); Color color = computeRadiance(&new_ray, scene, renderer, depth - renderer->specular_depth_cost); Color specular_color = mulVec3(color, material->specular_color); c = addVec3(c, specular_color); } } } return c; } else { return renderer->void_color; } } } void renderScene(Renderer* renderer, Scene* scene) { Vec3 right = normalizeVec3(crossVec3(renderer->direction, renderer->up)); Vec3 down = normalizeVec3(crossVec3(renderer->direction, right)); Vec3 forward = normalizeVec3(renderer->direction); float horizontal_scale = tanf(renderer->horizontal_view); float vertical_scale = tanf(renderer->vertical_view); #pragma omp parallel for schedule(dynamic, 1) for (int y = 0; y < renderer->height; y++) { for (int x = 0; x < renderer->width; x++) { float scale_x = (x / (float)renderer->width - 0.5) * horizontal_scale; float scale_y = (y / (float)renderer->height - 0.5) * vertical_scale; Vec3 direction = normalizeVec3(addVec3(forward, addVec3(scaleVec3(right, scale_x), scaleVec3(down, scale_y)))); Color pixel_color = createVec3(0, 0, 0); for (int s = 0; s < renderer->pixel_samples; s++) { Vec3 actual_direction = randomVec3InDirection(direction, 1e-5, 100); Ray ray = createRay(renderer->position, actual_direction); Color color = computeRadiance(&ray, scene, renderer, renderer->depth); pixel_color = addVec3(pixel_color, color); } pixel_color = scaleVec3(pixel_color, 1.0 / renderer->pixel_samples); Color* pixel = renderer->buffer + (y * renderer->width + x); *pixel = addVec3(*pixel, pixel_color); } } } void scaleBuffer(Renderer* renderer, float scale) { for (int i = 0; i < renderer->height; i++) { for (int j = 0; j < renderer->width; j++) { Color* pixel = renderer->buffer + (i * renderer->width + j); *pixel = scaleVec3(*pixel, scale); } } } void clearBuffer(Renderer* renderer) { for (int i = 0; i < renderer->height; i++) { for (int j = 0; j < renderer->width; j++) { renderer->buffer[i * renderer->width + j] = createVec3(0, 0, 0); } } }
task_target_device_codegen.c
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[ .].+[.|,]" --prefix-filecheck-ir-name _ // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp-simd -fopenmp-version=50 -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -fopenmp-version=50 -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} // expected-no-diagnostics #ifndef HEADER #define HEADER void test_task_affinity() { int t; #pragma omp task { #pragma omp target device(t) ; } } #endif // CHECK-LABEL: define {{[^@]+}}@test_task_affinity // CHECK-SAME: () #[[ATTR0:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[T:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1 // CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK-NEXT: [[TMP1:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i32 1, i64 48, i64 0, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*)) // CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to %struct.kmp_task_t_with_privates* // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 0 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 1 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP4]], i32 0, i32 0 // CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[T]], align 4 // CHECK-NEXT: store i32 [[TMP6]], i32* [[TMP5]], align 8 // CHECK-NEXT: [[TMP7:%.*]] = call i32 @__kmpc_omp_task(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]], i8* [[TMP1]]) // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_task_affinity_l18 // CHECK-SAME: () #[[ATTR1:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@.omp_task_privates_map. // CHECK-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], i32** noalias noundef [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32**, align 8 // CHECK-NEXT: store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8 // CHECK-NEXT: store i32** [[TMP1]], i32*** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP2]], i32 0, i32 0 // CHECK-NEXT: [[TMP4:%.*]] = load i32**, i32*** [[DOTADDR1]], align 8 // CHECK-NEXT: store i32* [[TMP3]], i32** [[TMP4]], align 8 // CHECK-NEXT: ret void // // // CHECK-LABEL: define {{[^@]+}}@.omp_task_entry. // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { // CHECK-NEXT: entry: // CHECK-NEXT: [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8 // CHECK-NEXT: [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-NEXT: [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8 // CHECK-NEXT: [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8 // CHECK-NEXT: [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8 // CHECK-NEXT: [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32*, align 8 // CHECK-NEXT: [[DOTCAPTURE_EXPR__I:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTADDR:%.*]] = alloca i32, align 4 // CHECK-NEXT: [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8 // CHECK-NEXT: store i32 [[TMP0]], i32* [[DOTADDR]], align 4 // CHECK-NEXT: store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4 // CHECK-NEXT: [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2 // CHECK-NEXT: [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0 // CHECK-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 // CHECK-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon* // CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1 // CHECK-NEXT: [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8* // CHECK-NEXT: [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8* // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]]) // CHECK-NEXT: call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]]) // CHECK-NEXT: store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !12 // CHECK-NEXT: store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP12:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP13:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP14:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP15:%.*]] = bitcast void (i8*, ...)* [[TMP13]] to void (i8*, i32**)* // CHECK-NEXT: call void [[TMP15]](i8* [[TMP14]], i32** [[DOTFIRSTPRIV_PTR_ADDR_I]]) #[[ATTR4:[0-9]+]] // CHECK-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !12 // CHECK-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4 // CHECK-NEXT: store i32 [[TMP17]], i32* [[DOTCAPTURE_EXPR__I]], align 4, !noalias !12 // CHECK-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_test_task_affinity_l18() #[[ATTR4]] // CHECK-NEXT: ret i32 0 //
GB_binop__gt_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__gt_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__gt_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__gt_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint32) // A*D function (colscale): GB (_AxD__gt_uint32) // D*A function (rowscale): GB (_DxB__gt_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__gt_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__gt_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint32) // C=scalar+B GB (_bind1st__gt_uint32) // C=scalar+B' GB (_bind1st_tran__gt_uint32) // C=A+scalar GB (_bind2nd__gt_uint32) // C=A'+scalar GB (_bind2nd_tran__gt_uint32) // C type: bool // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT32 || GxB_NO_GT_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
clang-host-targ2.c
#include <stdio.h> #include "assert.h" #include <unistd.h> #define TRIALS 1 #define N 960 int main() { int fail = 0; double A[N], B[N], C[N]; for (int i = 0; i < N; i++) { A[i] = 0.0; B[i] = 0.0; C[i] = 1.0; } int nte = 32; int tl = 64; int blockSize = tl; for (int t = 0 ; t < TRIALS ; t++) { #pragma omp target teams { #pragma omp distribute for(int j = 0 ; j < 256 ; j += blockSize) { #pragma omp parallel for for(int i = j ; i < j+blockSize; i++) { A[i] += B[i] + C[i]; } } } } for(int i = 0 ; i < 256 ; i++) { if (A[i] != TRIALS) { printf("Error at A[%d], h = %lf, d = %lf\n", i, (double)TRIALS, A[i]); fail = 1; } } if(fail){ printf("Failed\n"); return 1; } else{ printf("Succeeded\n"); return 0; } }
sort.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ /* * this program uses an algorithm that we call `cilksort'. * The algorithm is essentially mergesort: * * cilksort(in[1..n]) = * spawn cilksort(in[1..n/2], tmp[1..n/2]) * spawn cilksort(in[n/2..n], tmp[n/2..n]) * sync * spawn cilkmerge(tmp[1..n/2], tmp[n/2..n], in[1..n]) * * * The procedure cilkmerge does the following: * * cilkmerge(A[1..n], B[1..m], C[1..(n+m)]) = * find the median of A \union B using binary * search. The binary search gives a pair * (ma, mb) such that ma + mb = (n + m)/2 * and all elements in A[1..ma] are smaller than * B[mb..m], and all the B[1..mb] are smaller * than all elements in A[ma..n]. * * spawn cilkmerge(A[1..ma], B[1..mb], C[1..(n+m)/2]) * spawn cilkmerge(A[ma..m], B[mb..n], C[(n+m)/2 .. (n+m)]) * sync * * The algorithm appears for the first time (AFAIK) in S. G. Akl and * N. Santoro, "Optimal Parallel Merging and Sorting Without Memory * Conflicts", IEEE Trans. Comp., Vol. C-36 No. 11, Nov. 1987 . The * paper does not express the algorithm using recursion, but the * idea of finding the median is there. * * For cilksort of n elements, T_1 = O(n log n) and * T_\infty = O(log^3 n). There is a way to shave a * log factor in the critical path (left as homework). */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "bots.h" #include "app-desc.h" typedef long ELM; ELM *array, *tmp; static unsigned long rand_nxt = 0; static inline unsigned long my_rand(void) { rand_nxt = rand_nxt * 1103515245 + 12345; return rand_nxt; } static inline void my_srand(unsigned long seed) { rand_nxt = seed; } static inline ELM med3(ELM a, ELM b, ELM c) { if (a < b) { if (b < c) { return b; } else { if (a < c) return c; else return a; } } else { if (b > c) { return b; } else { if (a > c) return c; else return a; } } } /* * simple approach for now; a better median-finding * may be preferable */ static inline ELM choose_pivot(ELM *low, ELM *high) { return med3(*low, *high, low[(high - low) / 2]); } static ELM *seqpart(ELM *low, ELM *high) { ELM pivot; ELM h, l; ELM *curr_low = low; ELM *curr_high = high; pivot = choose_pivot(low, high); while (1) { while ((h = *curr_high) > pivot) curr_high--; while ((l = *curr_low) < pivot) curr_low++; if (curr_low >= curr_high) break; *curr_high-- = l; *curr_low++ = h; } /* * I don't know if this is really necessary. * The problem is that the pivot is not always the * first element, and the partition may be trivial. * However, if the partition is trivial, then * *high is the largest element, whence the following * code. */ if (curr_high < high) return curr_high; else return curr_high - 1; } #define swap(a, b) \ { \ ELM tmp;\ tmp = a;\ a = b;\ b = tmp;\ } static void insertion_sort(ELM *low, ELM *high) { ELM *p, *q; ELM a, b; for (q = low + 1; q <= high; ++q) { a = q[0]; for (p = q - 1; p >= low && (b = p[0]) > a; p--) p[1] = b; p[1] = a; } } /* * tail-recursive quicksort, almost unrecognizable :-) */ void seqquick(ELM *low, ELM *high) { ELM *p; while (high - low >= bots_app_cutoff_value_2) { p = seqpart(low, high); seqquick(low, p); low = p + 1; } insertion_sort(low, high); } void seqmerge(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { ELM a1, a2; /* * The following 'if' statement is not necessary * for the correctness of the algorithm, and is * in fact subsumed by the rest of the function. * However, it is a few percent faster. Here is why. * * The merging loop below has something like * if (a1 < a2) { * *dest++ = a1; * ++low1; * if (end of array) break; * a1 = *low1; * } * * Now, a1 is needed immediately in the next iteration * and there is no way to mask the latency of the load. * A better approach is to load a1 *before* the end-of-array * check; the problem is that we may be speculatively * loading an element out of range. While this is * probably not a problem in practice, yet I don't feel * comfortable with an incorrect algorithm. Therefore, * I use the 'fast' loop on the array (except for the last * element) and the 'slow' loop for the rest, saving both * performance and correctness. */ if (low1 < high1 && low2 < high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; a1 = *++low1; if (low1 >= high1) break; } else { *lowdest++ = a2; a2 = *++low2; if (low2 >= high2) break; } } } if (low1 <= high1 && low2 <= high2) { a1 = *low1; a2 = *low2; for (;;) { if (a1 < a2) { *lowdest++ = a1; ++low1; if (low1 > high1) break; a1 = *low1; } else { *lowdest++ = a2; ++low2; if (low2 > high2) break; a2 = *low2; } } } if (low1 > high1) { memcpy(lowdest, low2, sizeof(ELM) * (high2 - low2 + 1)); } else { memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1 + 1)); } } #define swap_indices(a, b) \ { \ ELM *tmp;\ tmp = a;\ a = b;\ b = tmp;\ } ELM *binsplit(ELM val, ELM *low, ELM *high) { /* * returns index which contains greatest element <= val. If val is * less than all elements, returns low-1 */ ELM *mid; while (low != high) { mid = low + ((high - low + 1) >> 1); if (val <= *mid) high = mid - 1; else low = mid; } if (*low > val) return low - 1; else return low; } void cilkmerge_par(ELM *low1, ELM *high1, ELM *low2, ELM *high2, ELM *lowdest) { /* * Cilkmerge: Merges range [low1, high1] with range [low2, high2] * into the range [lowdest, ...] */ ELM *split1, *split2; /* * where each of the ranges are broken for * recursive merge */ long int lowsize; /* * total size of lower halves of two * ranges - 2 */ /* * We want to take the middle element (indexed by split1) from the * larger of the two arrays. The following code assumes that split1 * is taken from range [low1, high1]. So if [low1, high1] is * actually the smaller range, we should swap it with [low2, high2] */ if (high2 - low2 > high1 - low1) { swap_indices(low1, low2); swap_indices(high1, high2); } if (high2 < low2) { /* smaller range is empty */ memcpy(lowdest, low1, sizeof(ELM) * (high1 - low1)); return; } if (high2 - low2 < bots_app_cutoff_value ) { seqmerge(low1, high1, low2, high2, lowdest); return; } /* * Basic approach: Find the middle element of one range (indexed by * split1). Find where this element would fit in the other range * (indexed by split 2). Then merge the two lower halves and the two * upper halves. */ split1 = ((high1 - low1 + 1) / 2) + low1; split2 = binsplit(*split1, low2, high2); lowsize = split1 - low1 + split2 - low2; /* * directly put the splitting element into * the appropriate location */ *(lowdest + lowsize + 1) = *split1; #pragma omp task untied cilkmerge_par(low1, split1 - 1, low2, split2, lowdest); #pragma omp task untied cilkmerge_par(split1 + 1, high1, split2 + 1, high2, lowdest + lowsize + 2); #pragma omp taskwait return; } void cilksort_par(ELM *low, ELM *tmp, long size) { /* * divide the input in four parts of the same size (A, B, C, D) * Then: * 1) recursively sort A, B, C, and D (in parallel) * 2) merge A and B into tmp1, and C and D into tmp2 (in parallel) * 3) merge tmp1 and tmp2 into the original array */ long quarter = size / 4; ELM *A, *B, *C, *D, *tmpA, *tmpB, *tmpC, *tmpD; if (size < bots_app_cutoff_value_1 ) { /* quicksort when less than 1024 elements */ seqquick(low, low + size - 1); return; } A = low; tmpA = tmp; B = A + quarter; tmpB = tmpA + quarter; C = B + quarter; tmpC = tmpB + quarter; D = C + quarter; tmpD = tmpC + quarter; #pragma omp task untied cilksort_par(A, tmpA, quarter); #pragma omp task untied cilksort_par(B, tmpB, quarter); #pragma omp task untied cilksort_par(C, tmpC, quarter); #pragma omp task untied cilksort_par(D, tmpD, size - 3 * quarter); #pragma omp taskwait #pragma omp task untied cilkmerge_par(A, A + quarter - 1, B, B + quarter - 1, tmpA); #pragma omp task untied cilkmerge_par(C, C + quarter - 1, D, low + size - 1, tmpC); #pragma omp taskwait cilkmerge_par(tmpA, tmpC - 1, tmpC, tmpA + size - 1, A); } void scramble_array( ELM *array ) { unsigned long i; unsigned long j; for (i = 0; i < bots_arg_size; ++i) { j = my_rand(); j = j % bots_arg_size; swap(array[i], array[j]); } } void fill_array( ELM *array ) { unsigned long i; my_srand(1); /* first, fill with integers 1..size */ for (i = 0; i < bots_arg_size; ++i) { array[i] = i; } } void sort_init ( void ) { /* Checking arguments */ if (bots_arg_size < 4) { bots_message("%s can not be less than 4, using 4 as a parameter.\n", BOTS_APP_DESC_ARG_SIZE ); bots_arg_size = 4; } if (bots_app_cutoff_value < 2) { bots_message("%s can not be less than 2, using 2 as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF); bots_app_cutoff_value = 2; } else if (bots_app_cutoff_value > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF, bots_arg_size); bots_app_cutoff_value = bots_arg_size; } if (bots_app_cutoff_value_1 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_1, bots_arg_size); bots_app_cutoff_value_1 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_arg_size ) { bots_message("%s can not be greather than vector size, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, bots_arg_size); bots_app_cutoff_value_2 = bots_arg_size; } if (bots_app_cutoff_value_2 > bots_app_cutoff_value_1) { bots_message("%s can not be greather than %s, using %d as a parameter.\n", BOTS_APP_DESC_ARG_CUTOFF_2, BOTS_APP_DESC_ARG_CUTOFF_1, bots_app_cutoff_value_1 ); bots_app_cutoff_value_2 = bots_app_cutoff_value_1; } array = (ELM *) malloc(bots_arg_size * sizeof(ELM)); tmp = (ELM *) malloc(bots_arg_size * sizeof(ELM)); fill_array(array); scramble_array(array); } void sort_par ( void ) { bots_message("Computing multisort algorithm (n=%d) ", bots_arg_size); #pragma omp parallel #pragma omp single nowait #pragma omp task untied cilksort_par(array, tmp, bots_arg_size); bots_message(" completed!\n"); } int sort_verify ( void ) { int i, success = 1; for (i = 0; i < bots_arg_size; ++i) if (array[i] != i) success = 0; return success ? BOTS_RESULT_SUCCESSFUL : BOTS_RESULT_UNSUCCESSFUL; }
GB_unaryop__ainv_int16_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int16_fp32 // op(A') function: GB_tran__ainv_int16_fp32 // C type: int16_t // A type: float // cast: int16_t cij ; GB_CAST_SIGNED(cij,aij,16) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int16_t z ; GB_CAST_SIGNED(z,x,16) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT16 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int16_fp32 ( int16_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int16_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ExplicitTopology.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_EXPLICITTOPOLOGY_H #define BK_EXPLICITTOPOLOGY_H #include <algorithm> #include <cassert> #include <cstdint> #include <fstream> #include <map> #include <string_view> #include <utility> #include <vector> #include <bkDataset/topology/Cell.h> #include <bk/StringUtils> namespace bk { template<int TCellSize = -1> class ExplicitTopology { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = ExplicitTopology<TCellSize>; public: using cell_type = Cell<TCellSize>; /// @{ -------------------------------------------------- IS EXPLICIT TOPOLOGY [[nodiscard]] static constexpr bool IsExplicit() noexcept { return true; } /// @} //==================================================================================================== //===== MEMBERS //==================================================================================================== private: std::vector<cell_type> _cells; std::map<unsigned int, std::vector<unsigned int>> _neighbors_of_point; std::map<unsigned int, std::vector<unsigned int>> _cells_of_point; bool _up2date; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR ExplicitTopology() : _up2date(false) { /* do nothing */ } ExplicitTopology(const self_type&) = default; ExplicitTopology(self_type&&) noexcept = default; /// @} /// @{ -------------------------------------------------- DTOR ~ExplicitTopology() = default; /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- GET NUM CELLS [[nodiscard]] unsigned int num_cells() const { return _cells.size(); } /// @} /// @{ -------------------------------------------------- GET CELL [[nodiscard]] cell_type& cell(unsigned int id) { return _cells[id]; } [[nodiscard]] const cell_type& cell(unsigned int id) const { return _cells[id]; } /// @} /// @{ -------------------------------------------------- GET ITERATORS [[nodiscard]] typename std::vector<cell_type>::iterator begin() { return _cells.begin(); } [[nodiscard]] typename std::vector<cell_type>::const_iterator begin() const { return _cells.begin(); } [[nodiscard]] typename std::vector<cell_type>::iterator end() { return _cells.end(); } [[nodiscard]] typename std::vector<cell_type>::const_iterator end() const { return _cells.end(); } [[nodiscard]] typename std::vector<cell_type>::reverse_iterator rbegin() { return _cells.rbegin(); } [[nodiscard]] typename std::vector<cell_type>::const_reverse_iterator rbegin() const { return _cells.rbegin(); } [[nodiscard]] typename std::vector<cell_type>::reverse_iterator rend() { return _cells.rend(); } [[nodiscard]] typename std::vector<cell_type>::const_reverse_iterator rend() const { return _cells.rend(); } /// @} //==================================================================================================== //===== ADD / REMOVE //==================================================================================================== /// @{ -------------------------------------------------- RESERVE NUM CELLS void reserve_num_cells(unsigned int N) { _cells.reserve(N); } /// @} /// @{ -------------------------------------------------- PUSH BACK [[maybe_unused]] cell_type& push_back(const cell_type& c) { _cells.push_back(c); _up2date = false; return _cells.back(); } [[maybe_unused]] cell_type& push_back(cell_type&& c) { _cells.push_back(std::move(c)); _up2date = false; return _cells.back(); } /// @} /// @{ -------------------------------------------------- EMPLACE BACK template<typename... TArgs> [[maybe_unused]] cell_type& emplace_back(TArgs&& ... args) { return _cells.emplace_back(std::forward<TArgs>(args)...); } /// @} /// @{ -------------------------------------------------- REMOVE [[maybe_unused]] bool remove(unsigned int i) { if (i < num_cells()) { _cells.erase(begin() + i); _up2date = false; return true; } return false; } [[maybe_unused]] bool remove(const cell_type& c) { if (auto it = std::find(begin(), end(), c); it != end()) { _cells.erase(it); _up2date = false; return true; } return false; } /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] self_type& operator=(const self_type&) = default; [[maybe_unused]] self_type& operator=(self_type&&) noexcept = default; /// @} /// @{ -------------------------------------------------- SET NUM CELLS void set_num_cells(unsigned int N) { _cells.resize(N); } /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- INITIALIZATION void init() { if (_up2date) { return; } init_neighbors_of_points(); init_cells_of_points(); _up2date = true; } void clear() { _cells.clear(); _up2date = false; } /// @} /// @{ -------------------------------------------------- INVERT CELL IDS void invert_cell_ordering() { #pragma omp parallel for for (unsigned int cellId = 0; cellId < num_cells(); ++cellId) { _cells[cellId].invert_order(); } } /// @} //==================================================================================================== //===== CONNECTIVITY //==================================================================================================== /// @{ -------------------------------------------------- HELPERS: UPDATE NEIGHBORS OF POINTS private: void add_point_neighbor_ids_of_all_cells() { for (unsigned int cellId = 0; cellId < num_cells(); ++cellId) { const cell_type& c = cell(cellId); unsigned int numPointIdsInCell = c.size(); for (unsigned int pointId = 0; pointId < numPointIdsInCell; ++pointId) { auto it = _neighbors_of_point.insert(std::make_pair(c[pointId], std::vector<unsigned int>())).first; for (unsigned int otherPointId = 0; otherPointId < numPointIdsInCell; ++otherPointId) { if (pointId == otherPointId) { continue; } it->second.push_back(c[otherPointId]); } } } } void remove_duplicate_neighbor_ids() { for (auto& pair: _neighbors_of_point) { pair.second.erase(std::unique(pair.second.begin(), pair.second.end()), pair.second.end()); } } void sort_neighbor_ids_by_index() { for (auto& pair: _neighbors_of_point) { std::sort(pair.second.begin(), pair.second.end()); } } void init_neighbors_of_points() { _neighbors_of_point.clear(); add_point_neighbor_ids_of_all_cells(); remove_duplicate_neighbor_ids(); } public: /// @} /// @{ -------------------------------------------------- NEIGHBORS OF POINT [[nodiscard]] const std::vector<unsigned int>& neighbors_of_point(unsigned int pointId) const { assert(_up2date && "call init() first"); auto it = _neighbors_of_point.find(pointId); assert(it != _neighbors_of_point.end() && "invalid pointId"); return it->second; } /// @} /// @{ -------------------------------------------------- HELPERS: UPDATE CELLS OF POINT private: void add_cell_ids_of_all_points() { for (unsigned int cellId = 0; cellId < num_cells(); ++cellId) { const cell_type& c = cell(cellId); unsigned int numPointIdsInCell = c.size(); for (unsigned int pointId = 0; pointId < numPointIdsInCell; ++pointId) { auto it = _cells_of_point.insert(std::make_pair(c[pointId], std::vector<unsigned int>())).first; it->second.push_back(cellId); } } } void init_cells_of_points() { _cells_of_point.clear(); add_cell_ids_of_all_points(); for (auto& pair: _cells_of_point) { std::sort(pair.second.begin(), pair.second.end()); } } public: /// @} /// @{ -------------------------------------------------- CELLS OF POINT [[nodiscard]] const std::vector<unsigned int>& cells_of_point(unsigned int pointId) const { assert(_up2date && "call init() first"); auto it = _cells_of_point.find(pointId); assert(it != _cells_of_point.end() && "invalid pointId"); return it->second; } /// @} //==================================================================================================== //===== I/O //==================================================================================================== /// @{ -------------------------------------------------- SAVE [[maybe_unused]] bool save(std::string_view filename) const { /* * check filename */ std::string fname(filename); const std::string suffix = ".etopo"; if (fname.empty()) { fname = "explicit-topology" + suffix; } else if (!string_utils::ends_with(fname, suffix)) { fname.append(suffix); } /* * create file */ std::ofstream file(fname, std::ios_base::out | std::ios_base::binary); /* * save */ const bool success = save(file); if (success) { file.close(); } return success; } [[maybe_unused]] bool save(std::ofstream& file) const { if (!file.is_open() || !file.good()) { return false; } std::uint32_t numCells = num_cells(); file.write(reinterpret_cast<char*>(&numCells), sizeof(std::uint32_t)); if (numCells == 0) { return true; } bool allCellsHaveSameNumberOfIds = true; std::uint32_t referenceNum = static_cast<std::uint32_t>(_cells[0].size()); std::uint32_t cellIdsTotal = 0; for (const cell_type& c: _cells) { if (static_cast<std::uint32_t>(c.size()) != referenceNum) { allCellsHaveSameNumberOfIds = false; } cellIdsTotal += c.size(); } std::uint8_t allCellsHaveSameNumberOfIdsui8 = allCellsHaveSameNumberOfIds ? true : false; file.write(reinterpret_cast<char*>(&allCellsHaveSameNumberOfIdsui8), sizeof(std::uint8_t)); if (allCellsHaveSameNumberOfIds) { file.write(reinterpret_cast<char*>(&referenceNum), sizeof(std::uint32_t)); } else { file.write(reinterpret_cast<char*>(&cellIdsTotal), sizeof(std::uint32_t)); } for (unsigned int cellId = 0; cellId < numCells; ++cellId) { const cell_type& c = cell(cellId); std::uint32_t numIds = c.size(); if (!allCellsHaveSameNumberOfIds) { file.write(reinterpret_cast<char*>(&numIds), sizeof(std::uint32_t)); } for (std::uint32_t k = 0; k < numIds; ++k) { std::uint32_t uitemp = static_cast<std::uint32_t>(c[k]); file.write(reinterpret_cast<char*>(&uitemp), sizeof(std::uint32_t)); } } return true; } /// @} /// @{ -------------------------------------------------- LOAD [[maybe_unused]] bool load(std::string_view filename) { /* * check file ending */ if (!string_utils::ends_with(filename.data(), ".etopo")) { return false; } /* * open file */ std::ifstream file(filename.data(), std::ios_base::in | std::ios_base::binary); /* * load */ const bool success = load(file); if (success) { file.close(); } return success; } [[maybe_unused]] bool load(std::ifstream& file) { clear(); if (!file.is_open() || !file.good()) { return false; } std::uint32_t numCells; file.read(reinterpret_cast<char*>(&numCells), sizeof(std::uint32_t)); set_num_cells(numCells); std::uint8_t allCellsHaveSameNumberOfIdsui8; file.read(reinterpret_cast<char*>(&allCellsHaveSameNumberOfIdsui8), sizeof(std::uint8_t)); const bool allCellsHaveSameNumberOfIds = allCellsHaveSameNumberOfIdsui8 == 1; std::uint32_t numIdsPerCellOrNumIdsTotal; file.read(reinterpret_cast<char*>(&numIdsPerCellOrNumIdsTotal), sizeof(std::uint32_t)); assert((TCellSize <= 0 || (TCellSize > 0 && TCellSize == static_cast<int>(numIdsPerCellOrNumIdsTotal))) && "ExplicitTopology is loaded from an object (this) with wrong TCellSize template argument"); std::vector<std::uint32_t> cellBuffer; if (allCellsHaveSameNumberOfIds) { cellBuffer.resize(numIdsPerCellOrNumIdsTotal * numCells); file.read(reinterpret_cast<char*>(cellBuffer.data()), cellBuffer.size() * sizeof(std::uint32_t)); #pragma omp parallel for for (unsigned int cellId = 0; cellId < numCells; ++cellId) { cell_type& c = cell(cellId); for (std::uint32_t k = 0; k < numIdsPerCellOrNumIdsTotal; ++k) { c[k] = cellBuffer[numIdsPerCellOrNumIdsTotal * cellId + k]; } } } else { cellBuffer.resize(numIdsPerCellOrNumIdsTotal + numCells); file.read(reinterpret_cast<char*>(cellBuffer.data()), cellBuffer.size() * sizeof(std::uint32_t)); unsigned int bufCnt = 0; for (unsigned int cellId = 0; cellId < numCells; ++cellId) { const std::uint32_t numIds = cellBuffer[bufCnt++]; cell_type& c = cell(cellId); if constexpr (TCellSize <= 0) { c.set_size(numIds); } for (std::uint32_t k = 0; k < numIds; ++k) { c[k] = cellBuffer[bufCnt++]; } } } return num_cells() != 0; } /// @} }; // class ExplicitTopology } // namespace bk #endif //BK_EXPLICITTOPOLOGY_H
9644.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "3mm.h" /* Array initialization. */ static void init_array(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nk; j++) A[i][j] = ((DATA_TYPE) i*j) / ni; for (i = 0; i < nk; i++) for (j = 0; j < nj; j++) B[i][j] = ((DATA_TYPE) i*(j+1)) / nj; for (i = 0; i < nj; i++) for (j = 0; j < nm; j++) C[i][j] = ((DATA_TYPE) i*(j+3)) / nl; for (i = 0; i < nm; i++) for (j = 0; j < nl; j++) D[i][j] = ((DATA_TYPE) i*(j+2)) / nk; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nl, DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nl; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]); if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_3mm(int ni, int nj, int nk, int nl, int nm, DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk), DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj), DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl), DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm), DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl), DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl)) { int i, j, k; #pragma scop #pragma omp parallel private (j, k) num_threads(2) { /* E := A*B */ #pragma omp for schedule(static, 8) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NJ; j++) { E[i][j] = 0; for (k = 0; k < _PB_NK; ++k) E[i][j] += A[i][k] * B[k][j]; } /* F := C*D */ #pragma omp for schedule(static, 8) for (i = 0; i < _PB_NJ; i++) for (j = 0; j < _PB_NL; j++) { F[i][j] = 0; for (k = 0; k < _PB_NM; ++k) F[i][j] += C[i][k] * D[k][j]; } /* G := E*F */ #pragma omp for schedule(static, 8) for (i = 0; i < _PB_NI; i++) for (j = 0; j < _PB_NL; j++) { G[i][j] = 0; for (k = 0; k < _PB_NJ; ++k) G[i][j] += E[i][k] * F[k][j]; } } #pragma endscop } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; int nk = NK; int nl = NL; int nm = NM; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj); POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl); POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm); POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl); POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl); /* Initialize array(s). */ init_array (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_3mm (ni, nj, nk, nl, nm, POLYBENCH_ARRAY(E), POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B), POLYBENCH_ARRAY(F), POLYBENCH_ARRAY(C), POLYBENCH_ARRAY(D), POLYBENCH_ARRAY(G)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G))); /* Be clean. */ POLYBENCH_FREE_ARRAY(E); POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); POLYBENCH_FREE_ARRAY(F); POLYBENCH_FREE_ARRAY(C); POLYBENCH_FREE_ARRAY(D); POLYBENCH_FREE_ARRAY(G); return 0; }
composite.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO M M PPPP OOO SSSSS IIIII TTTTT EEEEE % % C O O MM MM P P O O SS I T E % % C O O M M M PPPP O O SSS I T EEE % % C O O M M P O O SS I T E % % CCCC OOO M M P OOO SSSSS IIIII T EEEEE % % % % % % MagickCore Image Composite Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/accelerate-private.h" #include "magick/artifact.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/draw.h" #include "magick/fx.h" #include "magick/gem.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/memory_.h" #include "magick/option.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantum.h" #include "magick/resample.h" #include "magick/resource_.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/token.h" #include "magick/utility.h" #include "magick/version.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p o s i t e I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompositeImageChannel() returns the second image composited onto the first % at the specified offset, using the specified composite method. % % The format of the CompositeImageChannel method is: % % MagickBooleanType CompositeImage(Image *image, % const CompositeOperator compose,Image *source_image, % const ssize_t x_offset,const ssize_t y_offset) % MagickBooleanType CompositeImageChannel(Image *image, % const ChannelType channel,const CompositeOperator compose, % Image *source_image,const ssize_t x_offset,const ssize_t y_offset) % % A description of each parameter follows: % % o image: the canvas image, modified by he composition % % o channel: the channel. % % o compose: This operator affects how the composite is applied to % the image. The operators and how they are utilized are listed here % http://www.w3.org/TR/SVG12/#compositing. % % o source_image: the composite (source) image. % % o x_offset: the column offset of the composited image. % % o y_offset: the row offset of the composited image. % % Extra Controls from Image meta-data in 'source_image' (artifacts) % % o "compose:args" % A string containing extra numerical arguments for specific compose % methods, generally expressed as a 'geometry' or a comma separated list % of numbers. % % Compose methods needing such arguments include "BlendCompositeOp" and % "DisplaceCompositeOp". % % o "compose:outside-overlay" % Modify how the composition is to effect areas not directly covered % by the 'source_image' at the offset given. Normally this is % dependant on the 'compose' method, especially Duff-Porter methods. % % If set to "false" then disable all normal handling of pixels not % covered by the source_image. Typically used for repeated tiling % of the source_image by the calling API. % % Previous to IM v6.5.3-3 this was called "modify-outside-overlay" % */ /* ** Programmers notes on SVG specification. ** ** A Composition is defined by... ** Color Function : f(Sc,Dc) where Sc and Dc are the normizalized colors ** Blending areas : X = 1 for area of overlap ie: f(Sc,Dc) ** Y = 1 for source preserved ** Z = 1 for canvas preserved ** ** Conversion to transparency (then optimized) ** Dca' = f(Sc, Dc)*Sa*Da + Y*Sca*(1-Da) + Z*Dca*(1-Sa) ** Da' = X*Sa*Da + Y*Sa*(1-Da) + Z*Da*(1-Sa) ** ** Where... ** Sca = Sc*Sa normalized Source color divided by Source alpha ** Dca = Dc*Da normalized Dest color divided by Dest alpha ** Dc' = Dca'/Da' the desired color value for this channel. ** ** Da' in in the follow formula as 'gamma' The resulting alpla value. ** ** ** Most functions use a blending mode of over (X=1,Y=1,Z=1) ** this results in the following optimizations... ** gamma = Sa+Da-Sa*Da; ** gamma = 1 - QuantumScale*alpha * QuantumScale*beta; ** opacity = QuantumScale*alpha*beta; // over blend, optimized 1-Gamma ** ** The above SVG definitions also define that Mathematical Composition ** methods should use a 'Over' blending mode for Alpha Channel. ** It however was not applied for composition modes of 'Plus', 'Minus', ** the modulus versions of 'Add' and 'Subtract'. ** ** ** Mathematical operator changes to be applied from IM v6.7... ** ** 1/ Modulus modes 'Add' and 'Subtract' are obsoleted and renamed ** 'ModulusAdd' and 'ModulusSubtract' for clarity. ** ** 2/ All mathematical compositions work as per the SVG specification ** with regard to blending. This now includes 'ModulusAdd' and ** 'ModulusSubtract'. ** ** 3/ When the special channel flag 'sync' (syncronize channel updates) ** is turned off (enabled by default) then mathematical compositions are ** only performed on the channels specified, and are applied ** independantally of each other. In other words the mathematics is ** performed as 'pure' mathematical operations, rather than as image ** operations. */ static inline MagickRealType Atop(const MagickRealType p, const MagickRealType Sa,const MagickRealType q, const MagickRealType magick_unused(Da)) { magick_unreferenced(Da); return(p*Sa+q*(1.0-Sa)); /* Da optimized out, Da/gamma => 1.0 */ } static inline void CompositeAtop(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ composite->opacity=q->opacity; /* optimized Da = 1.0-Gamma */ composite->red=Atop(p->red,Sa,q->red,1.0); composite->green=Atop(p->green,Sa,q->green,1.0); composite->blue=Atop(p->blue,Sa,q->blue,1.0); if (q->colorspace == CMYKColorspace) composite->index=Atop(p->index,Sa,q->index,1.0); } /* What is this Composition method for? Can't find any specification! WARNING this is not doing correct 'over' blend handling (Anthony Thyssen). */ static inline void CompositeBumpmap(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType intensity; intensity=MagickPixelIntensity(p); composite->red=QuantumScale*intensity*q->red; composite->green=QuantumScale*intensity*q->green; composite->blue=QuantumScale*intensity*q->blue; composite->opacity=(MagickRealType) QuantumScale*intensity*p->opacity; if (q->colorspace == CMYKColorspace) composite->index=QuantumScale*intensity*q->index; } static inline void CompositeClear(const MagickPixelPacket *q, MagickPixelPacket *composite) { composite->opacity=(MagickRealType) TransparentOpacity; composite->red=0.0; composite->green=0.0; composite->blue=0.0; if (q->colorspace == CMYKColorspace) composite->index=0.0; } static MagickRealType ColorBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { double SaSca; if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca-Da) < MagickEpsilon)) return(Sa*Da+Dca*(1.0-Sa)); if (Sca < MagickEpsilon) return(Dca*(1.0-Sa)); SaSca=Sa*PerceptibleReciprocal(Sca); return(Sa*Da-Sa*MagickMin(Da,(Da-Dca)*SaSca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeColorBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType ColorDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* Oct 2004 SVG specification. */ if ((Sca*Da+Dca*Sa) >= Sa*Da) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #if 0 /* New specification, March 2009 SVG specification. This specification was also wrong of non-overlap cases. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*MagickMin(Da,Dca*Sa/(Sa-Sca))); #endif #if 0 /* Working from first principles using the original formula: f(Sc,Dc) = Dc/(1-Sc) This works correctly! Looks like the 2004 model was right but just required a extra condition for correct handling. */ if ((fabs(Sca-Sa) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Sca-Sa) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Dca*Sa*Sa/(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); #endif } static inline void CompositeColorDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*ColorDodge(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*ColorDodge(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*ColorDodge(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*ColorDodge(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Darken(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p < q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeDarken(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Darken is equivalent to a 'Minimum' method OR a greyscale version of a binary 'Or' OR the 'Intersection' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Darken(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Darken(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Darken(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Darken(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMax(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMin(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMin(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMin(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMin(p->index,q->index); } } static inline void CompositeDarkenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use intensity only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) < Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) < MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } static inline MagickRealType Difference(const MagickRealType p, const MagickRealType Sa,const MagickRealType q,const MagickRealType Da) { /* Optimized by Multipling by QuantumRange (taken from gamma). */ return(Sa*p+Da*q-Sa*Da*2.0*MagickMin(p,q)); } static inline void CompositeDifference(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); /* Values are not normalized as an optimization. */ composite->red=gamma*Difference(p->red,Sa,q->red,Da); composite->green=gamma*Difference(p->green,Sa,q->green,Da); composite->blue=gamma*Difference(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Difference(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-fabs((double) (p->opacity-q->opacity)); if ( (channel & RedChannel) != 0 ) composite->red=fabs((double) (p->red-q->red)); if ( (channel & GreenChannel) != 0 ) composite->green=fabs((double) (p->green-q->green)); if ( (channel & BlueChannel) != 0 ) composite->blue=fabs((double) (p->blue-q->blue)); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=fabs((double) (p->index-q->index)); } } static MagickRealType Divide(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { /* Divide Source by Destination f(Sc,Dc) = Sc / Dc But with appropriate handling for special case of Dc == 0 specifically so that f(Black,Black)=Black and f(non-Black,Black)=White. It is however also important to correctly do 'over' alpha blending which is why the formula becomes so complex. */ if ((fabs(Sca) < MagickEpsilon) && (fabs(Dca) < MagickEpsilon)) return(Sca*(1.0-Da)+Dca*(1.0-Sa)); if (fabs(Dca) < MagickEpsilon) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sca*Da*Da*PerceptibleReciprocal(Dca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeDivide(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Divide(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Divide(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Divide(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Divide(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Divide(Sa,1.0,Da,1.0)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Divide(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Divide(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Divide(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Divide(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0); } } static MagickRealType Exclusion(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Da+Dca*Sa-2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeExclusion(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType gamma, Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Exclusion(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Exclusion(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Exclusion(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Exclusion(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ((channel & AlphaChannel) != 0) composite->opacity=QuantumRange*(1.0-Exclusion(Sa,1.0,Da,1.0)); if ((channel & RedChannel) != 0) composite->red=QuantumRange*Exclusion(QuantumScale*p->red,1.0, QuantumScale*q->red,1.0); if ((channel & GreenChannel) != 0) composite->green=QuantumRange*Exclusion(QuantumScale*p->green,1.0, QuantumScale*q->green,1.0); if ((channel & BlueChannel) != 0) composite->blue=QuantumRange*Exclusion(QuantumScale*p->blue,1.0, QuantumScale*q->blue,1.0); if (((channel & IndexChannel) != 0) && (q->colorspace == CMYKColorspace)) composite->index=QuantumRange*Exclusion(QuantumScale*p->index,1.0, QuantumScale*q->index,1.0); } } static MagickRealType HardLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { if ((2.0*Sca) < Sa) return(2.0*Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); return(Sa*Da-2.0*(Da-Dca)*(Sa-Sca)+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeHardLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*HardLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*HardLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType HardMix(const MagickRealType Sca, const MagickRealType Dca) { if ((Sca+Dca) < QuantumRange) return(0.0); else return(1.0); } static inline void CompositeHardMix(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*HardMix(p->red*Sa,q->red*Da); composite->green=gamma*HardMix(p->green*Sa,q->green*Da); composite->blue=gamma*HardMix(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*HardMix(p->index*Sa,q->index*Da); } static void HCLComposite(const double hue,const double chroma,const double luma, MagickRealType *red,MagickRealType *green,MagickRealType *blue) { double b, c, g, h, m, r, x; /* Convert HCL to RGB colorspace. */ assert(red != (MagickRealType *) NULL); assert(green != (MagickRealType *) NULL); assert(blue != (MagickRealType *) NULL); h=6.0*hue; c=chroma; x=c*(1.0-fabs(fmod(h,2.0)-1.0)); r=0.0; g=0.0; b=0.0; if ((0.0 <= h) && (h < 1.0)) { r=c; g=x; } else if ((1.0 <= h) && (h < 2.0)) { r=x; g=c; } else if ((2.0 <= h) && (h < 3.0)) { g=c; b=x; } else if ((3.0 <= h) && (h < 4.0)) { g=x; b=c; } else if ((4.0 <= h) && (h < 5.0)) { r=x; b=c; } else if ((5.0 <= h) && (h < 6.0)) { r=c; b=x; } m=luma-(0.298839*r+0.586811*g+0.114350*b); *red=QuantumRange*(r+m); *green=QuantumRange*(g+m); *blue=QuantumRange*(b+m); } static void CompositeHCL(const MagickRealType red,const MagickRealType green, const MagickRealType blue,double *hue,double *chroma,double *luma) { double b, c, g, h, max, r; /* Convert RGB to HCL colorspace. */ assert(hue != (double *) NULL); assert(chroma != (double *) NULL); assert(luma != (double *) NULL); r=(double) red; g=(double) green; b=(double) blue; max=MagickMax(r,MagickMax(g,b)); c=max-(double) MagickMin(r,MagickMin(g,b)); h=0.0; if (c == 0) h=0.0; else if (red == (MagickRealType) max) h=fmod((g-b)/c+6.0,6.0); else if (green == (MagickRealType) max) h=((b-r)/c)+2.0; else if (blue == (MagickRealType) max) h=((r-g)/c)+4.0; *hue=(h/6.0); *chroma=QuantumScale*c; *luma=QuantumScale*(0.298839*r+0.586811*g+0.114350*b); } static inline MagickRealType In(const MagickRealType p,const MagickRealType Sa, const MagickRealType magick_unused(q),const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*Da); } static inline void CompositeIn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*Da; composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*In(p->red,Sa,q->red,Da); composite->green=gamma*In(p->green,Sa,q->green,Da); composite->blue=gamma*In(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*In(p->index,Sa,q->index,Da); } static inline MagickRealType Lighten(const MagickRealType p, const MagickRealType alpha,const MagickRealType q,const MagickRealType beta) { if (p > q) return(MagickOver_(p,alpha,q,beta)); /* src-over */ return(MagickOver_(q,beta,p,alpha)); /* dst-over */ } static inline void CompositeLighten(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Lighten is also equvalent to a 'Maximum' method OR a greyscale version of a binary 'And' OR the 'Union' of pixel sets. */ double gamma; if ( (channel & SyncChannels) != 0 ) { composite->opacity=QuantumScale*p->opacity*q->opacity; /* Over Blend */ gamma=1.0-QuantumScale*composite->opacity; gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Lighten(p->red,p->opacity,q->red,q->opacity); composite->green=gamma*Lighten(p->green,p->opacity,q->green,q->opacity); composite->blue=gamma*Lighten(p->blue,p->opacity,q->blue,q->opacity); if (q->colorspace == CMYKColorspace) composite->index=gamma*Lighten(p->index,p->opacity,q->index,q->opacity); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=MagickMin(p->opacity,q->opacity); if ( (channel & RedChannel) != 0 ) composite->red=MagickMax(p->red,q->red); if ( (channel & GreenChannel) != 0 ) composite->green=MagickMax(p->green,q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=MagickMax(p->blue,q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=MagickMax(p->index,q->index); } } static inline void CompositeLightenIntensity(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { /* Select the pixel based on the intensity level. If 'Sync' flag select whole pixel based on alpha weighted intensity. Otherwise use Intenisty only, but restrict copy according to channel. */ if ( (channel & SyncChannels) != 0 ) { MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; Da=1.0-QuantumScale*q->opacity; *composite = (Sa*MagickPixelIntensity(p) > Da*MagickPixelIntensity(q)) ? *p : *q; } else { int from_p = (MagickPixelIntensity(p) > MagickPixelIntensity(q)); if ( (channel & AlphaChannel) != 0 ) composite->opacity = from_p ? p->opacity : q->opacity; if ( (channel & RedChannel) != 0 ) composite->red = from_p ? p->red : q->red; if ( (channel & GreenChannel) != 0 ) composite->green = from_p ? p->green : q->green; if ( (channel & BlueChannel) != 0 ) composite->blue = from_p ? p->blue : q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index = from_p ? p->index : q->index; } } #if 0 static inline MagickRealType LinearDodge(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearDodge: simplifies to a trivial formula f(Sc,Dc) = Sc + Dc Dca' = Sca + Dca */ return(Sca+Dca); } #endif static inline void CompositeLinearDodge(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*(p->red*Sa+q->red*Da); composite->green=gamma*(p->green*Sa+q->green*Da); composite->blue=gamma*(p->blue*Sa+q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*(p->index*Sa+q->index*Da); } static inline MagickRealType LinearBurn(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* LinearBurn: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Sc + Dc - 1 */ return(Sca+Dca-Sa*Da); } static inline void CompositeLinearBurn(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearBurn(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearBurn(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearBurn(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearBurn(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType LinearLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { #if 0 /* Previous formula, was only valid for fully-opaque images. */ return(Dca+2*Sca-1.0); #else /* LinearLight: as defined by Abode Photoshop, according to http://www.simplefilter.de/en/basics/mixmods.html is: f(Sc,Dc) = Dc + 2*Sc - 1 */ return((Sca-Sa)*Da+Sca+Dca); #endif } static inline void CompositeLinearLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*LinearLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*LinearLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*LinearLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*LinearLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Mathematics(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da, const GeometryInfo *geometry_info) { /* 'Mathematics' a free form user control mathematical composition is defined as... f(Sc,Dc) = A*Sc*Dc + B*Sc + C*Dc + D Where the arguments A,B,C,D are (currently) passed to composite as a command separated 'geometry' string in "compose:args" image artifact. A = a->rho, B = a->sigma, C = a->xi, D = a->psi Applying the SVG transparency formula (see above), we get... Dca' = Sa*Da*f(Sc,Dc) + Sca*(1.0-Da) + Dca*(1.0-Sa) Dca' = A*Sca*Dca + B*Sca*Da + C*Dca*Sa + D*Sa*Da + Sca*(1.0-Da) + Dca*(1.0-Sa) */ return(geometry_info->rho*Sca*Dca+geometry_info->sigma*Sca*Da+ geometry_info->xi*Dca*Sa+geometry_info->psi*Sa*Da+Sca*(1.0-Da)+ Dca*(1.0-Sa)); } static inline void CompositeMathematics(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, const GeometryInfo *args, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* ??? - AT */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Mathematics(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da,args); composite->green=gamma*Mathematics(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da,args); composite->blue=gamma*Mathematics(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da,args); if (q->colorspace == CMYKColorspace) composite->index=gamma*Mathematics(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da,args); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Mathematics(Sa,1.0,Da,1.0,args)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange* Mathematics(QuantumScale*p->red,1.0,QuantumScale*q->red,1.0,args); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange* Mathematics(QuantumScale*p->green,1.0,QuantumScale*q->green,1.0,args); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange* Mathematics(QuantumScale*p->blue,1.0,QuantumScale*q->blue,1.0,args); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange* Mathematics(QuantumScale*p->index,1.0,QuantumScale*q->index,1.0,args); } } static inline void CompositePlus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { /* NOTE: "Plus" does not use 'over' alpha-blending but uses a special 'plus' form of alph-blending. It is the ONLY mathematical operator to do this. this is what makes it different to the otherwise equivalent "LinearDodge" composition method. Note however that color channels are still effected by the alpha channel as a result of the blending, making it just as useless for independant channel maths, just like all other mathematical composition methods. As such the removal of the 'sync' flag, is still a usful convention. The MagickPixelCompositePlus() function is defined in "composite-private.h" so it can also be used for Image Blending. */ MagickPixelCompositePlus(p,p->opacity,q,q->opacity,composite); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=p->opacity+q->opacity-QuantumRange; if ( (channel & RedChannel) != 0 ) composite->red=p->red+q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green+q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue+q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index+q->index; } } static inline MagickRealType Minus(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca, const MagickRealType magick_unused(Da)) { /* Minus Source from Destination f(Sc,Dc) = Sc - Dc */ magick_unreferenced(Da); return(Sca+Dca-2*Dca*Sa); } static inline void CompositeMinus(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Minus(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Minus(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Minus(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Minus(p->index*Sa,Sa,q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-(Sa-Da)); if ( (channel & RedChannel) != 0 ) composite->red=p->red-q->red; if ( (channel & GreenChannel) != 0 ) composite->green=p->green-q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=p->blue-q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=p->index-q->index; } } static inline MagickRealType ModulusAdd(const MagickRealType Sc, const MagickRealType Sa,const MagickRealType Dc,const MagickRealType Da) { if (((Sc*Sa)+(Dc*Da)) <= QuantumRange) return((Sc*Sa)+Dc*Da); return(((Sc*Sa)+Dc*Da)-QuantumRange); } static inline void CompositeModulusAdd(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Sa, Da; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusAdd(p->red,Sa,q->red,Da); composite->green=ModulusAdd(p->green,Sa,q->green,Da); composite->blue=ModulusAdd(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusAdd(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusAdd(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusAdd(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusAdd(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusAdd(p->index,1.0,q->index,1.0); } } static inline MagickRealType ModulusSubtract(const MagickRealType Sc, const MagickRealType Sa,const MagickRealType Dc,const MagickRealType Da) { if (((Sc*Sa)-(Dc*Da)) <= 0.0) return((Sc*Sa)-Dc*Da); return(((Sc*Sa)-Dc*Da)+QuantumRange); } static inline void CompositeModulusSubtract(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { if ( (channel & SyncChannels) != 0 ) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma = RoundToUnity(Sa+Da-Sa*Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=ModulusSubtract(p->red,Sa,q->red,Da); composite->green=ModulusSubtract(p->green,Sa,q->green,Da); composite->blue=ModulusSubtract(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,Sa,q->index,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange-ModulusSubtract(QuantumRange-p->opacity, 1.0,QuantumRange-q->opacity,1.0); if ( (channel & RedChannel) != 0 ) composite->red=ModulusSubtract(p->red,1.0,q->red,1.0); if ( (channel & GreenChannel) != 0 ) composite->green=ModulusSubtract(p->green,1.0,q->green,1.0); if ( (channel & BlueChannel) != 0 ) composite->blue=ModulusSubtract(p->blue,1.0,q->blue,1.0); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=ModulusSubtract(p->index,1.0,q->index,1.0); } } static inline MagickRealType Multiply(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { return(Sca*Dca+Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeMultiply(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Multiply(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*Multiply(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*Multiply(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Multiply(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Sa*Da); if ( (channel & RedChannel) != 0 ) composite->red=QuantumScale*p->red*q->red; if ( (channel & GreenChannel) != 0 ) composite->green=QuantumScale*p->green*q->green; if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumScale*p->blue*q->blue; if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumScale*p->index*q->index; } } static inline MagickRealType Out(const MagickRealType p, const MagickRealType Sa,const MagickRealType magick_unused(q), const MagickRealType Da) { magick_unreferenced(q); return(Sa*p*(1.0-Da)); } static inline void CompositeOut(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa*(1.0-Da); composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Out(p->red,Sa,q->red,Da); composite->green=gamma*Out(p->green,Sa,q->green,Da); composite->blue=gamma*Out(p->blue,Sa,q->blue,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Out(p->index,Sa,q->index,Da); } static MagickRealType PegtopLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PegTop: A Soft-Light alternative: A continuous version of the Softlight function, producing very similar results. f(Sc,Dc) = Dc^2*(1-2*Sc) + 2*Sc*Dc See http://www.pegtop.net/delphi/articles/blendmodes/softlight.htm. */ if (fabs(Da) < MagickEpsilon) return(Sca); return(Dca*Dca*(Sa-2.0*Sca)*PerceptibleReciprocal(Da)+Sca*(2.0*Dca+1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositePegtopLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PegtopLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PegtopLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PegtopLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PegtopLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType PinLight(const MagickRealType Sca, const MagickRealType Sa,const MagickRealType Dca,const MagickRealType Da) { /* PinLight: A Photoshop 7 composition method http://www.simplefilter.de/en/basics/mixmods.html f(Sc,Dc) = Dc<2*Sc-1 ? 2*Sc-1 : Dc>2*Sc ? 2*Sc : Dc */ if (Dca*Sa < Da*(2*Sca-Sa)) return(Sca*(Da+1.0)-Sa*Da+Dca*(1.0-Sa)); if ((Dca*Sa) > (2*Sca*Da)) return(Sca*Da+Sca+Dca*(1.0-Sa)); return(Sca*(1.0-Da)+Dca); } static inline void CompositePinLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*PinLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*PinLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*PinLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*PinLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static inline MagickRealType Screen(const MagickRealType Sca, const MagickRealType Dca) { /* Screen: A negated multiply f(Sc,Dc) = 1.0-(1.0-Sc)*(1.0-Dc) */ return(Sca+Dca-Sca*Dca); } static inline void CompositeScreen(const MagickPixelPacket *p, const MagickPixelPacket *q,const ChannelType channel, MagickPixelPacket *composite) { double gamma; MagickRealType Da, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; if ( (channel & SyncChannels) != 0 ) { gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); Sa*=(MagickRealType) QuantumScale; Da*=(MagickRealType) QuantumScale; /* optimization */ gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*Screen(p->red*Sa,q->red*Da); composite->green=gamma*Screen(p->green*Sa,q->green*Da); composite->blue=gamma*Screen(p->blue*Sa,q->blue*Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Screen(p->index*Sa,q->index*Da); } else { /* handle channels as separate grayscale channels */ if ( (channel & AlphaChannel) != 0 ) composite->opacity=QuantumRange*(1.0-Screen(Sa,Da)); if ( (channel & RedChannel) != 0 ) composite->red=QuantumRange*Screen(QuantumScale*p->red, QuantumScale*q->red); if ( (channel & GreenChannel) != 0 ) composite->green=QuantumRange*Screen(QuantumScale*p->green, QuantumScale*q->green); if ( (channel & BlueChannel) != 0 ) composite->blue=QuantumRange*Screen(QuantumScale*p->blue, QuantumScale*q->blue); if ( (channel & IndexChannel) != 0 && q->colorspace == CMYKColorspace) composite->index=QuantumRange*Screen(QuantumScale*p->index, QuantumScale*q->index); } } static MagickRealType SoftLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { MagickRealType alpha, beta; alpha=Dca*PerceptibleReciprocal(Da); if ((2.0*Sca) < Sa) return(Dca*(Sa+(2.0*Sca-Sa)*(1.0-alpha))+Sca*(1.0-Da)+Dca*(1.0-Sa)); if (((2.0*Sca) > Sa) && ((4.0*Dca) <= Da)) { beta=Dca*Sa+Da*(2.0*Sca-Sa)*(4.0*alpha*(4.0*alpha+1.0)*(alpha-1.0)+7.0* alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } beta=Dca*Sa+Da*(2.0*Sca-Sa)*(pow(alpha,0.5)-alpha)+Sca*(1.0-Da)+Dca*(1.0-Sa); return(beta); } static inline void CompositeSoftLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*SoftLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*SoftLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*SoftLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*SoftLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } /* Deprecated Multiply difference by amount, if differance larger than threshold??? What use this is is completely unknown The Opacity calculation appears to be inverted -- Anthony Thyssen */ static inline MagickRealType Threshold(const MagickRealType p, const MagickRealType q,const MagickRealType threshold, const MagickRealType amount) { MagickRealType delta; delta=p-q; if ((MagickRealType) fabs((double) (2.0*delta)) < threshold) return(q); return(q+delta*amount); } static inline void CompositeThreshold(const MagickPixelPacket *p, const MagickPixelPacket *q,const MagickRealType threshold, const MagickRealType amount,MagickPixelPacket *composite) { composite->red=Threshold(p->red,q->red,threshold,amount); composite->green=Threshold(p->green,q->green,threshold,amount); composite->blue=Threshold(p->blue,q->blue,threshold,amount); composite->opacity=QuantumRange-Threshold(p->opacity,q->opacity, threshold,amount); if (q->colorspace == CMYKColorspace) composite->index=Threshold(p->index,q->index,threshold,amount); } static MagickRealType VividLight(const MagickRealType Sca, const MagickRealType Sa, const MagickRealType Dca, const MagickRealType Da) { /* VividLight: A Photoshop 7 composition method. See http://www.simplefilter.de/en/basics/mixmods.html. f(Sc,Dc) = (2*Sc < 1) ? 1-(1-Dc)/(2*Sc) : Dc/(2*(1-Sc)) */ if ((fabs(Sa) < MagickEpsilon) || (fabs(Sca-Sa) < MagickEpsilon)) return(Sa*Da+Sca*(1.0-Da)+Dca*(1.0-Sa)); if ((2*Sca) <= Sa) return(Sa*(Da+Sa*(Dca-Da)*PerceptibleReciprocal(2.0*Sca))+Sca*(1.0-Da)+ Dca*(1.0-Sa)); return(Dca*Sa*Sa*PerceptibleReciprocal(2.0*(Sa-Sca))+Sca*(1.0-Da)+Dca* (1.0-Sa)); } static inline void CompositeVividLight(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=RoundToUnity(Sa+Da-Sa*Da); /* over blend, as per SVG doc */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=QuantumRange/(fabs(gamma) < MagickEpsilon ? MagickEpsilon : gamma); composite->red=gamma*VividLight(QuantumScale*p->red*Sa,Sa,QuantumScale* q->red*Da,Da); composite->green=gamma*VividLight(QuantumScale*p->green*Sa,Sa,QuantumScale* q->green*Da,Da); composite->blue=gamma*VividLight(QuantumScale*p->blue*Sa,Sa,QuantumScale* q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*VividLight(QuantumScale*p->index*Sa,Sa,QuantumScale* q->index*Da,Da); } static MagickRealType Xor(const MagickRealType Sca,const MagickRealType Sa, const MagickRealType Dca,const MagickRealType Da) { return(Sca*(1.0-Da)+Dca*(1.0-Sa)); } static inline void CompositeXor(const MagickPixelPacket *p, const MagickPixelPacket *q,MagickPixelPacket *composite) { MagickRealType Da, gamma, Sa; Sa=1.0-QuantumScale*p->opacity; /* simplify and speed up equations */ Da=1.0-QuantumScale*q->opacity; gamma=Sa+Da-2*Sa*Da; /* Xor blend mode X=0,Y=1,Z=1 */ composite->opacity=(MagickRealType) QuantumRange*(1.0-gamma); gamma=PerceptibleReciprocal(gamma); composite->red=gamma*Xor(p->red*Sa,Sa,q->red*Da,Da); composite->green=gamma*Xor(p->green*Sa,Sa,q->green*Da,Da); composite->blue=gamma*Xor(p->blue*Sa,Sa,q->blue*Da,Da); if (q->colorspace == CMYKColorspace) composite->index=gamma*Xor(p->index*Sa,Sa,q->index*Da,Da); } MagickExport MagickBooleanType CompositeImage(Image *image, const CompositeOperator compose,const Image *source_image, const ssize_t x_offset,const ssize_t y_offset) { MagickBooleanType status; status=CompositeImageChannel(image,DefaultChannels,compose,source_image, x_offset,y_offset); return(status); } MagickExport MagickBooleanType CompositeImageChannel(Image *image, const ChannelType channel,const CompositeOperator compose, const Image *composite,const ssize_t x_offset,const ssize_t y_offset) { #define CompositeImageTag "Composite/Image" CacheView *source_view, *image_view; const char *value; ExceptionInfo *exception; GeometryInfo geometry_info; Image *canvas_image, *source_image; MagickBooleanType clamp, clip_to_self, status; MagickOffsetType progress; MagickPixelPacket zero; MagickRealType amount, canvas_dissolve, midpoint, percent_luma, percent_chroma, source_dissolve, threshold; MagickStatusType flags; ssize_t y; /* Prepare composite image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(composite != (Image *) NULL); assert(composite->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); source_image=CloneImage(composite,0,0,MagickTrue,exception); if (source_image == (const Image *) NULL) return(MagickFalse); if (IsGrayColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace); (void) SetImageColorspace(source_image,image->colorspace); GetMagickPixelPacket(image,&zero); canvas_image=(Image *) NULL; amount=0.5; canvas_dissolve=1.0; clip_to_self=MagickTrue; percent_luma=100.0; percent_chroma=100.0; source_dissolve=1.0; threshold=0.05f; switch (compose) { case ClearCompositeOp: case SrcCompositeOp: case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: { /* Modify canvas outside the overlaid region. */ clip_to_self=MagickFalse; break; } case OverCompositeOp: { if (image->matte != MagickFalse) break; if (source_image->matte != MagickFalse) break; } case CopyCompositeOp: { if ((x_offset < 0) || (y_offset < 0)) break; if ((x_offset+(ssize_t) source_image->columns) >= (ssize_t) image->columns) break; if ((y_offset+(ssize_t) source_image->rows) >= (ssize_t) image->rows) break; status=MagickTrue; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source_image,image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const IndexPacket *source_indexes; register const PixelPacket *p; register IndexPacket *indexes; register PixelPacket *q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); q=GetCacheViewAuthenticPixels(image_view,x_offset,y+y_offset, source_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } source_indexes=GetCacheViewVirtualIndexQueue(source_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); (void) memcpy(q,p,source_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (source_indexes != (const IndexPacket *) NULL)) (void) memcpy(indexes,source_indexes, source_image->columns*sizeof(*indexes)); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,CompositeImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); return(status); } case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { /* Modify canvas outside the overlaid region and require an alpha channel to exist, to add transparency. */ if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); clip_to_self=MagickFalse; break; } case BlurCompositeOp: { CacheView *canvas_view, *source_view; MagickPixelPacket pixel; MagickRealType angle_range, angle_start, height, width; ResampleFilter *resample_filter; SegmentInfo blur; /* Blur Image by resampling. Blur Image dictated by an overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,angle]]. */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } /* Gather the maximum blur sigma values from user. */ SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & WidthValue) == 0) { (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidGeometry","'%s' '%s'","compose:args",value); source_image=DestroyImage(source_image); canvas_image=DestroyImage(canvas_image); return(MagickFalse); } /* Users input sigma now needs to be converted to the EWA ellipse size. The filter defaults to a sigma of 0.5 so to make this match the users input the ellipse size needs to be doubled. */ width=height=geometry_info.rho*2.0; if ((flags & HeightValue) != 0 ) height=geometry_info.sigma*2.0; /* default the unrotated ellipse width and height axis vectors */ blur.x1=width; blur.x2=0.0; blur.y1=0.0; blur.y2=height; /* rotate vectors if a rotation angle is given */ if ((flags & XValue) != 0 ) { MagickRealType angle; angle=DegreesToRadians(geometry_info.xi); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } /* Otherwise lets set a angle range and calculate in the loop */ angle_start=0.0; angle_range=0.0; if ((flags & YValue) != 0 ) { angle_start=DegreesToRadians(geometry_info.xi); angle_range=DegreesToRadians(geometry_info.psi)-angle_start; } /* Set up a gaussian cylindrical filter for EWA Bluring. As the minimum ellipse radius of support*1.0 the EWA algorithm can only produce a minimum blur of 0.5 for Gaussian (support=2.0) This means that even 'No Blur' will be still a little blurry! The solution (as well as the problem of preventing any user expert filter settings, is to set our own user settings, then restore them afterwards. */ resample_filter=AcquireResampleFilter(image,exception); SetResampleFilter(resample_filter,GaussianFilter,1.0); /* do the variable blurring of each pixel in image */ pixel=zero; source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register PixelPacket *magick_restrict r; register IndexPacket *magick_restrict canvas_indexes; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } if (fabs(angle_range) > MagickEpsilon) { MagickRealType angle; angle=angle_start+angle_range*QuantumScale*GetPixelBlue(p); blur.x1=width*cos(angle); blur.x2=width*sin(angle); blur.y1=(-height*sin(angle)); blur.y2=height*cos(angle); } #if 0 if ( x == 10 && y == 60 ) { fprintf(stderr, "blur.x=%lf,%lf, blur.y=%lf,%lf\n", blur.x1, blur.x2, blur.y1, blur.y2); fprintf(stderr, "scaled by=%lf,%lf\n", QuantumScale*GetPixelRed(p), QuantumScale*GetPixelGreen(p)); } #endif ScaleResampleFilter(resample_filter, blur.x1*QuantumScale*GetPixelRed(p), blur.y1*QuantumScale*GetPixelGreen(p), blur.x2*QuantumScale*GetPixelRed(p), blur.y2*QuantumScale*GetPixelGreen(p)); (void) ResamplePixelColor(resample_filter,(double) x_offset+x,(double) y_offset+y,&pixel); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } resample_filter=DestroyResampleFilter(resample_filter); source_view=DestroyCacheView(source_view); canvas_view=DestroyCacheView(canvas_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DisplaceCompositeOp: case DistortCompositeOp: { CacheView *canvas_view, *source_view, *image_view; MagickPixelPacket pixel; MagickRealType horizontal_scale, vertical_scale; PointInfo center, offset; register IndexPacket *magick_restrict canvas_indexes; register PixelPacket *magick_restrict r; /* Displace/Distort based on overlay gradient map: X = red_channel; Y = green_channel; compose:args = x_scale[,y_scale[,center.x,center.y]] */ canvas_image=CloneImage(image,0,0,MagickTrue,exception); if (canvas_image == (Image *) NULL) { source_image=DestroyImage(source_image); return(MagickFalse); } SetGeometryInfo(&geometry_info); flags=NoValue; value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) flags=ParseGeometry(value,&geometry_info); if ((flags & (WidthValue | HeightValue)) == 0 ) { if ((flags & AspectValue) == 0) { horizontal_scale=(MagickRealType) (source_image->columns-1)/2.0; vertical_scale=(MagickRealType) (source_image->rows-1)/2.0; } else { horizontal_scale=(MagickRealType) (image->columns-1)/2.0; vertical_scale=(MagickRealType) (image->rows-1)/2.0; } } else { horizontal_scale=geometry_info.rho; vertical_scale=geometry_info.sigma; if ((flags & PercentValue) != 0) { if ((flags & AspectValue) == 0) { horizontal_scale*=(source_image->columns-1)/200.0; vertical_scale*=(source_image->rows-1)/200.0; } else { horizontal_scale*=(image->columns-1)/200.0; vertical_scale*=(image->rows-1)/200.0; } } if ((flags & HeightValue) == 0) vertical_scale=horizontal_scale; } /* Determine fixed center point for absolute distortion map Absolute distort == Displace offset relative to a fixed absolute point Select that point according to +X+Y user inputs. default = center of overlay image arg flag '!' = locations/percentage relative to background image */ center.x=(MagickRealType) x_offset; center.y=(MagickRealType) y_offset; if (compose == DistortCompositeOp) { if ((flags & XValue) == 0) if ((flags & AspectValue) != 0) center.x=((MagickRealType) image->columns-1)/2.0; else center.x=(MagickRealType) (x_offset+(source_image->columns-1)/ 2.0); else if ((flags & AspectValue) == 0) center.x=(MagickRealType) (x_offset+geometry_info.xi); else center.x=geometry_info.xi; if ((flags & YValue) == 0) if ((flags & AspectValue) != 0) center.y=((MagickRealType) image->rows-1)/2.0; else center.y=(MagickRealType) (y_offset+(source_image->rows-1)/2.0); else if ((flags & AspectValue) != 0) center.y=geometry_info.psi; else center.y=(MagickRealType) (y_offset+geometry_info.psi); } /* Shift the pixel offset point as defined by the provided, displacement/distortion map. -- Like a lens... */ pixel=zero; image_view=AcquireVirtualCacheView(image,exception); source_view=AcquireVirtualCacheView(source_image,exception); canvas_view=AcquireAuthenticCacheView(canvas_image,exception); for (y=0; y < (ssize_t) source_image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register ssize_t x; if (((y+y_offset) < 0) || ((y+y_offset) >= (ssize_t) image->rows)) continue; p=GetCacheViewVirtualPixels(source_view,0,y,source_image->columns, 1,exception); r=QueueCacheViewAuthenticPixels(canvas_view,0,y,canvas_image->columns, 1,exception); if ((p == (const PixelPacket *) NULL) || (r == (PixelPacket *) NULL)) break; canvas_indexes=GetCacheViewAuthenticIndexQueue(canvas_view); for (x=0; x < (ssize_t) source_image->columns; x++) { if (((x_offset+x) < 0) || ((x_offset+x) >= (ssize_t) image->columns)) { p++; continue; } /* Displace the offset. */ offset.x=(double) ((horizontal_scale*(GetPixelRed(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.x+((compose == DisplaceCompositeOp) ? x : 0)); offset.y=(double) ((vertical_scale*(GetPixelGreen(p)- (((MagickRealType) QuantumRange+1.0)/2.0)))/(((MagickRealType) QuantumRange+1.0)/2.0)+center.y+((compose == DisplaceCompositeOp) ? y : 0)); status=InterpolateMagickPixelPacket(image,image_view, UndefinedInterpolatePixel,(double) offset.x,(double) offset.y, &pixel,exception); if (status == MagickFalse) break; /* Mask with the 'invalid pixel mask' in alpha channel. */ pixel.opacity=(MagickRealType) QuantumRange*(1.0-(1.0-QuantumScale* pixel.opacity)*(1.0-QuantumScale*GetPixelOpacity(p))); SetPixelPacket(canvas_image,&pixel,r,canvas_indexes+x); p++; r++; } if (x < (ssize_t) source_image->columns) break; sync=SyncCacheViewAuthenticPixels(canvas_view,exception); if (sync == MagickFalse) break; } canvas_view=DestroyCacheView(canvas_view); source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); source_image=canvas_image; break; } case DissolveCompositeOp: { /* Geometry arguments to dissolve factors. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0; if ((source_dissolve-MagickEpsilon) < 0.0) source_dissolve=0.0; if ((source_dissolve+MagickEpsilon) > 1.0) { canvas_dissolve=2.0-source_dissolve; source_dissolve=1.0; } if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; if ((canvas_dissolve-MagickEpsilon) < 0.0) canvas_dissolve=0.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0 ) { canvas_dissolve=1.0; clip_to_self=MagickTrue; } } break; } case BlendCompositeOp: { value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); source_dissolve=geometry_info.rho/100.0; canvas_dissolve=1.0-source_dissolve; if ((flags & SigmaValue) != 0) canvas_dissolve=geometry_info.sigma/100.0; clip_to_self=MagickFalse; if ((canvas_dissolve+MagickEpsilon) > 1.0) clip_to_self=MagickTrue; } break; } case MathematicsCompositeOp: { /* Just collect the values from "compose:args", setting. Unused values are set to zero automagically. Arguments are normally a comma separated list, so this probably should be changed to some 'general comma list' parser, (with a minimum number of values) */ SetGeometryInfo(&geometry_info); value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) (void) ParseGeometry(value,&geometry_info); break; } case ModulateCompositeOp: { /* Determine the luma and chroma scale. */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); percent_luma=geometry_info.rho; if ((flags & SigmaValue) != 0) percent_chroma=geometry_info.sigma; } break; } case ThresholdCompositeOp: { /* Determine the amount and threshold. This Composition method is deprecated */ value=GetImageArtifact(image,"compose:args"); if (value != (char *) NULL) { flags=ParseGeometry(value,&geometry_info); amount=geometry_info.rho; threshold=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold=0.05f; } threshold*=QuantumRange; break; } default: break; } value=GetImageArtifact(image,"compose:outside-overlay"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) == MagickFalse ? MagickTrue : MagickFalse; value=GetImageArtifact(image,"compose:clip-to-self"); if (value != (const char *) NULL) clip_to_self=IsMagickTrue(value) != MagickFalse ? MagickTrue : MagickFalse; clamp=MagickTrue; value=GetImageArtifact(image,"compose:clamp"); if (value != (const char *) NULL) clamp=IsMagickTrue(value); /* Composite image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) status=AccelerateCompositeImage(image,channel,compose,source_image, x_offset,y_offset,canvas_dissolve,source_dissolve,exception); if (status != MagickFalse) return(status); #endif status=MagickTrue; progress=0; midpoint=((MagickRealType) QuantumRange+1.0)/2; GetMagickPixelPacket(source_image,&zero); source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const PixelPacket *pixels; double luma, hue, chroma, sans; MagickPixelPacket composite, canvas, source; register const IndexPacket *magick_restrict source_indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if (clip_to_self != MagickFalse) { if (y < y_offset) continue; if ((y-y_offset) >= (ssize_t) source_image->rows) continue; } /* If pixels is NULL, y is outside overlay region. */ pixels=(PixelPacket *) NULL; p=(PixelPacket *) NULL; if ((y >= y_offset) && ((y-y_offset) < (ssize_t) source_image->rows)) { p=GetCacheViewVirtualPixels(source_view,0,y-y_offset, source_image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } pixels=p; if (x_offset < 0) p-=x_offset; } q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); source_indexes=GetCacheViewVirtualIndexQueue(source_view); GetMagickPixelPacket(source_image,&source); GetMagickPixelPacket(image,&canvas); hue=0.0; chroma=0.0; luma=0.0; for (x=0; x < (ssize_t) image->columns; x++) { if (clip_to_self != MagickFalse) { if (x < x_offset) { q++; continue; } if ((x-x_offset) >= (ssize_t) source_image->columns) break; } canvas.red=(MagickRealType) GetPixelRed(q); canvas.green=(MagickRealType) GetPixelGreen(q); canvas.blue=(MagickRealType) GetPixelBlue(q); if (image->matte != MagickFalse) canvas.opacity=(MagickRealType) GetPixelOpacity(q); if (image->colorspace == CMYKColorspace) canvas.index=(MagickRealType) GetPixelIndex(indexes+x); if (image->colorspace == CMYKColorspace) { canvas.red=(MagickRealType) QuantumRange-canvas.red; canvas.green=(MagickRealType) QuantumRange-canvas.green; canvas.blue=(MagickRealType) QuantumRange-canvas.blue; canvas.index=(MagickRealType) QuantumRange-canvas.index; } /* Handle canvas modifications outside overlaid region. */ composite=canvas; if ((pixels == (PixelPacket *) NULL) || (x < x_offset) || ((x-x_offset) >= (ssize_t) source_image->columns)) { switch (compose) { case DissolveCompositeOp: case BlendCompositeOp: { composite.opacity=(MagickRealType) (QuantumRange-canvas_dissolve* (QuantumRange-composite.opacity)); break; } case ClearCompositeOp: case SrcCompositeOp: { CompositeClear(&canvas,&composite); break; } case InCompositeOp: case SrcInCompositeOp: case OutCompositeOp: case SrcOutCompositeOp: case DstInCompositeOp: case DstAtopCompositeOp: case CopyOpacityCompositeOp: case ChangeMaskCompositeOp: { composite.opacity=(MagickRealType) TransparentOpacity; break; } default: { (void) GetOneVirtualMagickPixel(source_image,x-x_offset, y-y_offset,&composite,exception); break; } } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); if (image->matte != MagickFalse) SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); q++; continue; } /* Handle normal overlay of source onto canvas. */ source.red=(MagickRealType) GetPixelRed(p); source.green=(MagickRealType) GetPixelGreen(p); source.blue=(MagickRealType) GetPixelBlue(p); if (source_image->matte != MagickFalse) source.opacity=(MagickRealType) GetPixelOpacity(p); if (source_image->colorspace == CMYKColorspace) source.index=(MagickRealType) GetPixelIndex(source_indexes+ x-x_offset); if (source_image->colorspace == CMYKColorspace) { source.red=(MagickRealType) QuantumRange-source.red; source.green=(MagickRealType) QuantumRange-source.green; source.blue=(MagickRealType) QuantumRange-source.blue; source.index=(MagickRealType) QuantumRange-source.index; } switch (compose) { /* Duff-Porter Compositions */ case ClearCompositeOp: { CompositeClear(&canvas,&composite); break; } case SrcCompositeOp: case CopyCompositeOp: case ReplaceCompositeOp: { composite=source; break; } case NoCompositeOp: case DstCompositeOp: break; case OverCompositeOp: case SrcOverCompositeOp: { MagickPixelCompositeOver(&source,source.opacity,&canvas, canvas.opacity,&composite); break; } case DstOverCompositeOp: { MagickPixelCompositeOver(&canvas,canvas.opacity,&source, source.opacity,&composite); break; } case SrcInCompositeOp: case InCompositeOp: { CompositeIn(&source,&canvas,&composite); break; } case DstInCompositeOp: { CompositeIn(&canvas,&source,&composite); break; } case OutCompositeOp: case SrcOutCompositeOp: { CompositeOut(&source,&canvas,&composite); break; } case DstOutCompositeOp: { CompositeOut(&canvas,&source,&composite); break; } case AtopCompositeOp: case SrcAtopCompositeOp: { CompositeAtop(&source,&canvas,&composite); break; } case DstAtopCompositeOp: { CompositeAtop(&canvas,&source,&composite); break; } case XorCompositeOp: { CompositeXor(&source,&canvas,&composite); break; } /* Mathematical Compositions */ case PlusCompositeOp: { CompositePlus(&source,&canvas,channel,&composite); break; } case MinusDstCompositeOp: { CompositeMinus(&source,&canvas,channel,&composite); break; } case MinusSrcCompositeOp: { CompositeMinus(&canvas,&source,channel,&composite); break; } case ModulusAddCompositeOp: { CompositeModulusAdd(&source,&canvas,channel,&composite); break; } case ModulusSubtractCompositeOp: { CompositeModulusSubtract(&source,&canvas,channel,&composite); break; } case DifferenceCompositeOp: { CompositeDifference(&source,&canvas,channel,&composite); break; } case ExclusionCompositeOp: { CompositeExclusion(&source,&canvas,channel,&composite); break; } case MultiplyCompositeOp: { CompositeMultiply(&source,&canvas,channel,&composite); break; } case ScreenCompositeOp: { CompositeScreen(&source,&canvas,channel,&composite); break; } case DivideDstCompositeOp: { CompositeDivide(&source,&canvas,channel,&composite); break; } case DivideSrcCompositeOp: { CompositeDivide(&canvas,&source,channel,&composite); break; } case DarkenCompositeOp: { CompositeDarken(&source,&canvas,channel,&composite); break; } case LightenCompositeOp: { CompositeLighten(&source,&canvas,channel,&composite); break; } case DarkenIntensityCompositeOp: { CompositeDarkenIntensity(&source,&canvas,channel,&composite); break; } case LightenIntensityCompositeOp: { CompositeLightenIntensity(&source,&canvas,channel,&composite); break; } case MathematicsCompositeOp: { CompositeMathematics(&source,&canvas,channel,&geometry_info, &composite); break; } /* Lighting Compositions */ case ColorDodgeCompositeOp: { CompositeColorDodge(&source,&canvas,&composite); break; } case ColorBurnCompositeOp: { CompositeColorBurn(&source,&canvas,&composite); break; } case LinearDodgeCompositeOp: { CompositeLinearDodge(&source,&canvas,&composite); break; } case LinearBurnCompositeOp: { CompositeLinearBurn(&source,&canvas,&composite); break; } case HardLightCompositeOp: { CompositeHardLight(&source,&canvas,&composite); break; } case HardMixCompositeOp: { CompositeHardMix(&source,&canvas,&composite); break; } case OverlayCompositeOp: { /* Overlay = Reversed HardLight. */ CompositeHardLight(&canvas,&source,&composite); break; } case SoftLightCompositeOp: { CompositeSoftLight(&source,&canvas,&composite); break; } case LinearLightCompositeOp: { CompositeLinearLight(&source,&canvas,&composite); break; } case PegtopLightCompositeOp: { CompositePegtopLight(&source,&canvas,&composite); break; } case VividLightCompositeOp: { CompositeVividLight(&source,&canvas,&composite); break; } case PinLightCompositeOp: { CompositePinLight(&source,&canvas,&composite); break; } /* Other Composition */ case ChangeMaskCompositeOp: { if ((composite.opacity > ((MagickRealType) QuantumRange/2.0)) || (IsMagickColorSimilar(&source,&canvas) != MagickFalse)) composite.opacity=(MagickRealType) TransparentOpacity; else composite.opacity=(MagickRealType) OpaqueOpacity; break; } case BumpmapCompositeOp: { if (source.opacity == TransparentOpacity) break; CompositeBumpmap(&source,&canvas,&composite); break; } case DissolveCompositeOp: { MagickPixelCompositeOver(&source,(MagickRealType) (QuantumRange- source_dissolve*(QuantumRange-source.opacity)),&canvas, (MagickRealType) (QuantumRange-canvas_dissolve*(QuantumRange- canvas.opacity)),&composite); break; } case BlendCompositeOp: { MagickPixelCompositeBlend(&source,source_dissolve,&canvas, canvas_dissolve,&composite); break; } case StereoCompositeOp: { composite.red=(MagickRealType) GetPixelRed(p); composite.opacity=(composite.opacity+canvas.opacity/2); break; } case ThresholdCompositeOp: { CompositeThreshold(&source,&canvas,threshold,amount,&composite); break; } case ModulateCompositeOp: { ssize_t offset; if (source.opacity == TransparentOpacity) break; offset=(ssize_t) (MagickPixelIntensityToQuantum(&source)-midpoint); if (offset == 0) break; CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); luma+=(0.01*percent_luma*offset)/midpoint; chroma*=0.01*percent_chroma; HCLComposite(hue,chroma,luma,&composite.red,&composite.green, &composite.blue); break; } case HueCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&sans,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case SaturateCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&chroma, &sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case LuminizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&hue, &chroma,&luma); CompositeHCL(source.red,source.green,source.blue,&sans,&sans, &luma); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case ColorizeCompositeOp: { if (source.opacity == TransparentOpacity) break; if (canvas.opacity == TransparentOpacity) { composite=source; break; } CompositeHCL(canvas.red,canvas.green,canvas.blue,&sans, &sans,&luma); CompositeHCL(source.red,source.green,source.blue,&hue,&chroma,&sans); HCLComposite(hue,chroma,luma,&composite.red, &composite.green,&composite.blue); if (source.opacity < canvas.opacity) composite.opacity=source.opacity; break; } case CopyRedCompositeOp: case CopyCyanCompositeOp: { composite.red=source.red; break; } case CopyGreenCompositeOp: case CopyMagentaCompositeOp: { composite.green=source.green; break; } case CopyBlueCompositeOp: case CopyYellowCompositeOp: { composite.blue=source.blue; break; } case CopyOpacityCompositeOp: { if (source.matte == MagickFalse) composite.opacity=(MagickRealType) (QuantumRange- MagickPixelIntensityToQuantum(&source)); else composite.opacity=source.opacity; break; } case CopyBlackCompositeOp: { if (source.colorspace != CMYKColorspace) ConvertRGBToCMYK(&source); composite.index=source.index; break; } /* compose methods that are already handled */ case BlurCompositeOp: case DisplaceCompositeOp: case DistortCompositeOp: { composite=source; break; } default: break; } if (image->colorspace == CMYKColorspace) { composite.red=(MagickRealType) QuantumRange-composite.red; composite.green=(MagickRealType) QuantumRange-composite.green; composite.blue=(MagickRealType) QuantumRange-composite.blue; composite.index=(MagickRealType) QuantumRange-composite.index; } SetPixelRed(q,clamp != MagickFalse ? ClampPixel(composite.red) : ClampToQuantum(composite.red)); SetPixelGreen(q,clamp != MagickFalse ? ClampPixel(composite.green) : ClampToQuantum(composite.green)); SetPixelBlue(q,clamp != MagickFalse ? ClampPixel(composite.blue) : ClampToQuantum(composite.blue)); SetPixelOpacity(q,clamp != MagickFalse ? ClampPixel(composite.opacity) : ClampToQuantum(composite.opacity)); if (image->colorspace == CMYKColorspace) SetPixelIndex(indexes+x,clamp != MagickFalse ? ClampPixel(composite.index) : ClampToQuantum(composite.index)); p++; if (p >= (pixels+source_image->columns)) p=pixels; q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CompositeImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); if (canvas_image != (Image * ) NULL) canvas_image=DestroyImage(canvas_image); else source_image=DestroyImage(source_image); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e x t u r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TextureImage() repeatedly tiles the texture image across and down the image % canvas. % % The format of the TextureImage method is: % % MagickBooleanType TextureImage(Image *image,const Image *texture) % % A description of each parameter follows: % % o image: the image. % % o texture: This image is the texture to layer on the background. % */ MagickExport MagickBooleanType TextureImage(Image *image,const Image *texture) { #define TextureImageTag "Texture/Image" CacheView *image_view, *texture_view; ExceptionInfo *exception; Image *texture_image; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (texture == (const Image *) NULL) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); exception=(&image->exception); texture_image=CloneImage(texture,0,0,MagickTrue,exception); if (texture_image == (const Image *) NULL) return(MagickFalse); (void) TransformImageColorspace(texture_image,image->colorspace); (void) SetImageVirtualPixelMethod(texture_image,TileVirtualPixelMethod); status=MagickTrue; if ((image->compose != CopyCompositeOp) && ((image->compose != OverCompositeOp) || (image->matte != MagickFalse) || (texture_image->matte != MagickFalse))) { /* Tile texture onto the image background. */ for (y=0; y < (ssize_t) image->rows; y+=(ssize_t) texture_image->rows) { register ssize_t x; if (status == MagickFalse) continue; for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { MagickBooleanType thread_status; thread_status=CompositeImage(image,image->compose,texture_image,x+ texture_image->tile_offset.x,y+texture_image->tile_offset.y); if (thread_status == MagickFalse) { status=thread_status; break; } } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } (void) SetImageProgress(image,TextureImageTag,(MagickOffsetType) image->rows,image->rows); texture_image=DestroyImage(texture_image); return(status); } /* Tile texture onto the image background (optimized). */ status=MagickTrue; texture_view=AcquireVirtualCacheView(texture_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,texture_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const IndexPacket *texture_indexes; register const PixelPacket *p; register IndexPacket *indexes; register ssize_t x; register PixelPacket *q; size_t width; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(texture_view,texture_image->tile_offset.x,(y+ texture_image->tile_offset.y) % texture_image->rows, texture_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } texture_indexes=GetCacheViewVirtualIndexQueue(texture_view); indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) texture_image->columns) { width=texture_image->columns; if ((x+(ssize_t) width) > (ssize_t) image->columns) width=image->columns-x; (void) memcpy(q,p,width*sizeof(*p)); if ((image->colorspace == CMYKColorspace) && (texture_image->colorspace == CMYKColorspace)) { (void) memcpy(indexes,texture_indexes,width* sizeof(*indexes)); indexes+=width; } q+=width; } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,TextureImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } texture_view=DestroyCacheView(texture_view); image_view=DestroyCacheView(image_view); texture_image=DestroyImage(texture_image); return(status); }
GB_unop__isnan_bool_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__isnan_bool_fp32) // op(A') function: GB (_unop_tran__isnan_bool_fp32) // C type: bool // A type: float // cast: float cij = (aij) // unaryop: cij = isnan (aij) #define GB_ATYPE \ float #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = isnan (x) ; // casting #define GB_CAST(z, aij) \ float z = (aij) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (aij) ; \ Cx [pC] = isnan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISNAN || GxB_NO_BOOL || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__isnan_bool_fp32) ( bool *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = (aij) ; Cx [p] = isnan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__isnan_bool_fp32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
omp_doacross.c
// RUN: %libomp-compile-and-run // XFAIL: gcc-4, gcc-5, clang-3.7, clang-3.8, icc-15, icc-16 #include <stdio.h> #include <stdlib.h> #include "omp_testsuite.h" #ifndef N #define N 750 #endif int test_doacross() { int i, j; // Allocate and zero out the matrix int *m = (int *)malloc(sizeof(int) * N * N); for (i = 0; i < N; ++i) { for (j = 0; j < N; ++j) { m[i * N + j] = 0; } } // Have first row and column be 0, 1, 2, 3, etc. for (i = 0; i < N; ++i) m[i * N] = i; for (j = 0; j < N; ++j) m[j] = j; // Perform wavefront which results in matrix: // 0 1 2 3 4 // 1 2 3 4 5 // 2 3 4 5 6 // 3 4 5 6 7 // 4 5 6 7 8 #pragma omp parallel shared(m) { int row, col; #pragma omp for ordered(2) for (row = 1; row < N; ++row) { for (col = 1; col < N; ++col) { #pragma omp ordered depend(sink : row - 1, col) depend(sink : row, col - 1) m[row * N + col] = m[(row - 1) * N + col] + m[row * N + (col - 1)] - m[(row - 1) * N + (col - 1)]; #pragma omp ordered depend(source) } } } // Check the bottom right element to see if iteration dependencies were held int retval = (m[(N - 1) * N + N - 1] == 2 * (N - 1)); free(m); return retval; } int main(int argc, char **argv) { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_doacross()) { num_failed++; } } return num_failed; }
sgemm.c
#include <stdio.h> #include <stdlib.h> #include <inttypes.h> #include <math.h> #include <sys/time.h> #include <omp.h> /* * C = [n, q] = A[n, m] * B[m, q] */ enum { N = 1000, M = 1000, Q = 1000, NREPS = 5, }; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* Matrix multiplication C[n, q] = A[n, m] * B[m, q] */ void sgemm_phi(float *a, float *b, float *c, int n, int m, int q) { #pragma offload target(mic) in(a:length(n * m)) in(b:length(m * q)) out(c:length(n * q)) { #pragma omp parallel { int k = 0; #pragma omp for for (int i = 0; i < n; i++) for (int j = 0; j < q; j++) c[k++] = 0.0; #pragma omp for for (int i = 0; i < n; i++) { for (int k = 0; k < m; k++) { for (int j = 0; j < q; j++) c[i * q + j] += a[i * m + k] * b[k * q + j]; } } } } } double run_phi(const char *msg, void (*sgemm_fun)(float *, float *, float *, int, int, int)) { double gflop = 2.0 * N * Q * M * 1E-9; float *a, *b, *c; a = malloc(sizeof(*a) * N * M); b = malloc(sizeof(*b) * M * Q); c = malloc(sizeof(*c) * N * Q); if (a == NULL || b == NULL || c == NULL) { fprintf(stderr, "No enough memory\n"); exit(EXIT_FAILURE); } srand(0); for (int i = 0; i < N; i++) { for (int j = 0; j < M; j++) a[i * M + j] = rand() % 100; // 1.0; } for (int i = 0; i < M; i++) { for (int j = 0; j < Q; j++) b[i * Q + j] = rand() % 100; // 2.0; } /* Warmup */ double twarmup = wtime(); sgemm_fun(a, b, c, N, M, Q); twarmup = wtime() - twarmup; /* Measures */ double tavg = 0.0; double tmin = 1E6; double tmax = 0.0; for (int i = 0; i < NREPS; i++) { double t = wtime(); sgemm_fun(a, b, c, N, M, Q); t = wtime() - t; tavg += t; tmin = (tmin > t) ? t : tmin; tmax = (tmax < t) ? t : tmax; } tavg /= NREPS; printf("%s (%d runs): perf %.2f GFLOPS; time: tavg %.6f, tmin %.6f, tmax %.6f, twarmup %.6f\n", msg, NREPS, gflop / tavg, tavg, tmin, tmax, twarmup); free(c); free(b); free(a); return tavg; } int main(int argc, char **argv) { printf("SGEMM N = %d, M = %d, Q = %d\n", N, M, Q); char buf[256]; sprintf(buf, "Phi OMP %s", getenv("MIC_OMP_NUM_THREADS")); run_phi(buf, &sgemm_phi); return 0; }
conv_im2col_sgemm_sse_sgemm.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. // #include "option.h" #include "mat.h" namespace ncnn{ static void conv_im2col_sgemm_sse_sgemm(const Mat &bottom_blob, Mat &top_blob, const Mat & kernel_tm, const Mat& _bias, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt, int inch) { Mat bottom_tm = bottom_blob; //size_t elemsize = bottom_blob.elemsize; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const float* bias = _bias; // sgemm(int M, int N, int L, float* A, float* B, float* C) { //int M = outch; // outch int N = outw * outh; // outsize or out stride int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp=0; pp<nn_outch; pp++) { int i = pp * 4; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i+1); float* output2 = top_blob.channel(i+2); float* output3 = top_blob.channel(i+3); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + i : zeros; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4); #if 0 //TODO: BUG for googlenet __m128 _sum0 = _mm_set1_ps(biasptr[0]); __m128 _sum1 = _mm_set1_ps(biasptr[1]); __m128 _sum2 = _mm_set1_ps(biasptr[2]); __m128 _sum3 = _mm_set1_ps(biasptr[3]); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 // k1 _vb = _mm_loadu_ps(vb+4); _va0 = _mm_set1_ps(va[4]); _va1 = _mm_set1_ps(va[5]); _va2 = _mm_set1_ps(va[6]); _va3 = _mm_set1_ps(va[7]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a10-a13) * k01 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a10-a13) * k11 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a10-a13) * k21 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a10-a13) * k31 // k2 _vb = _mm_loadu_ps(vb+8); _va0 = _mm_set1_ps(va[8]); _va1 = _mm_set1_ps(va[9]); _va2 = _mm_set1_ps(va[10]); _va3 = _mm_set1_ps(va[11]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a20-a23) * k02 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a20-a23) * k12 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a20-a23) * k22 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a20-a23) * k32 // k3 _vb = _mm_loadu_ps(vb+12); _va0 = _mm_set1_ps(va[12]); _va1 = _mm_set1_ps(va[13]); _va2 = _mm_set1_ps(va[14]); _va3 = _mm_set1_ps(va[15]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a30-a33) * k03 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a30-a33) * k13 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a30-a33) * k23 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a30-a33) * k33 va += 16; vb += 16; } for (; k<L; k++) { // k0 __m128 _vb = _mm_loadu_ps(vb); __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb, _va0));// sum0 = (a00-a03) * k00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_vb, _va1));// sum1 = (a00-a03) * k10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_vb, _va2));// sum2 = (a00-a03) * k20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_vb, _va3));// sum3 = (a00-a03) * k30 va += 4; vb += 4; } _mm_storeu_ps(output0, _sum0); _mm_storeu_ps(output1, _sum1); _mm_storeu_ps(output2, _sum2); _mm_storeu_ps(output3, _sum3); #else float sum0[4] = {0}; float sum1[4] = {0}; float sum2[4] = {0}; float sum3[4] = {0}; int k=0; for (; k+7<L; k=k+8) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; va += 4; sum0[n] += va[0] * vb[n+4]; sum1[n] += va[1] * vb[n+4]; sum2[n] += va[2] * vb[n+4]; sum3[n] += va[3] * vb[n+4]; va += 4; sum0[n] += va[0] * vb[n+8]; sum1[n] += va[1] * vb[n+8]; sum2[n] += va[2] * vb[n+8]; sum3[n] += va[3] * vb[n+8]; va += 4; sum0[n] += va[0] * vb[n+12]; sum1[n] += va[1] * vb[n+12]; sum2[n] += va[2] * vb[n+12]; sum3[n] += va[3] * vb[n+12]; va += 4; sum0[n] += va[0] * vb[n+16]; sum1[n] += va[1] * vb[n+16]; sum2[n] += va[2] * vb[n+16]; sum3[n] += va[3] * vb[n+16]; va += 4; sum0[n] += va[0] * vb[n+20]; sum1[n] += va[1] * vb[n+20]; sum2[n] += va[2] * vb[n+20]; sum3[n] += va[3] * vb[n+20]; va += 4; sum0[n] += va[0] * vb[n+24]; sum1[n] += va[1] * vb[n+24]; sum2[n] += va[2] * vb[n+24]; sum3[n] += va[3] * vb[n+24]; va += 4; sum0[n] += va[0] * vb[n+28]; sum1[n] += va[1] * vb[n+28]; sum2[n] += va[2] * vb[n+28]; sum3[n] += va[3] * vb[n+28]; va -= 28; } va += 32; vb += 32; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum0[n] += va[0] * vb[n]; sum1[n] += va[1] * vb[n]; sum2[n] += va[2] * vb[n]; sum3[n] += va[3] * vb[n]; } va += 4; vb += 4; } for (int n=0; n<4; n++) { output0[n] = sum0[n] + biasptr[0]; output1[n] = sum1[n] + biasptr[1]; output2[n] = sum2[n] + biasptr[2]; output3[n] = sum3[n] + biasptr[3]; } #endif // __SSE__ output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4); #if __SSE__ __m128 _sum0_3 = _mm_loadu_ps(biasptr); __m128 _sum0 = _mm_set1_ps(0.0); __m128 _sum1 = _mm_set1_ps(0.0); __m128 _sum2 = _mm_set1_ps(0.0); __m128 _sum3 = _mm_set1_ps(0.0); int k=0; for (; k+3<L; k=k+4) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _vb1 = _mm_set1_ps(vb[1]); __m128 _vb2 = _mm_set1_ps(vb[2]); __m128 _vb3 = _mm_set1_ps(vb[3]); __m128 _va0 = _mm_loadu_ps(va); __m128 _va1 = _mm_loadu_ps(va+4); __m128 _va2 = _mm_loadu_ps(va+8); __m128 _va3 = _mm_loadu_ps(va+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0));// sum0 += (k00-k30) * a00 _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va1, _vb1));// sum1 += (k01-k31) * a10 _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va2, _vb2));// sum2 += (k02-k32) * a20 _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va3, _vb3));// sum3 += (k03-k33) * a30 va += 16; vb += 4; } _sum0 = _mm_add_ps(_sum0, _sum1); _sum2 = _mm_add_ps(_sum2, _sum3); _sum0_3 = _mm_add_ps(_sum0_3, _sum0); _sum0_3 = _mm_add_ps(_sum0_3, _sum2); for (; k<L; k++) { __m128 _vb0 = _mm_set1_ps(vb[0]); __m128 _va = _mm_loadu_ps(va); _sum0_3 = _mm_add_ps(_sum0_3, _mm_mul_ps(_va, _vb0));// sum0 += (k00-k30) * a00 va += 4; vb += 1; } output0[0] = _sum0_3[0]; output1[0] = _sum0_3[1]; output2[0] = _sum0_3[2]; output3[0] = _sum0_3[3]; #else float sum0 = biasptr[0]; float sum1 = biasptr[1]; float sum2 = biasptr[2]; float sum3 = biasptr[3]; for (int k=0; k<L; k++) { sum0 += va[0] * vb[0]; sum1 += va[1] * vb[0]; sum2 += va[2] * vb[0]; sum3 += va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; #endif // __SSE__ output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i=remain_outch_start; i<outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; int j=0; for (; j+3<N; j=j+4) { const float* vb = bottom_tm.channel(j/4); const float* va = kernel_tm.channel(i/4 + i%4); #if __SSE__ __m128 _sum0 = _mm_set1_ps(bias0); int k=0; for (; k+3<L; k=k+4) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _va1 = _mm_set1_ps(va[1]); __m128 _va2 = _mm_set1_ps(va[2]); __m128 _va3 = _mm_set1_ps(va[3]); __m128 _vb0 = _mm_loadu_ps(vb); __m128 _vb1 = _mm_loadu_ps(vb+4); __m128 _vb2 = _mm_loadu_ps(vb+8); __m128 _vb3 = _mm_loadu_ps(vb+12); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0));// sum0 = (a00-a03) * k00 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb1, _va1));// sum0 += (a10-a13) * k01 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb2, _va2));// sum0 += (a20-a23) * k02 _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb3, _va3));// sum0 += (a30-a33) * k03 va += 4; vb += 16; } for (; k<L; k++) { // k0 __m128 _va0 = _mm_set1_ps(va[0]); __m128 _vb0 = _mm_loadu_ps(vb); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_vb0, _va0)); // sum0 = (a00-a03) * k00 va += 1; vb += 4; } _mm_storeu_ps(output, _sum0); #else float sum[4] = {0}; int k=0; for (; k+3<L; k=k+4) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; sum[n] += va[1] * vb[n+4]; sum[n] += va[2] * vb[n+8]; sum[n] += va[3] * vb[n+12]; //sum[n] += va[4] * vb[n+16]; //sum[n] += va[5] * vb[n+20]; //sum[n] += va[6] * vb[n+24]; //sum[n] += va[7] * vb[n+28]; } va += 4; vb += 16; } for (; k<L; k++) { for (int n=0; n<4; n++) { sum[n] += va[0] * vb[n]; } va += 1; vb += 4; } for (int n=0; n<4; n++) { output[n] = sum[n] + bias0; } #endif // __SSE__ output += 4; } for (; j<N; j++) { const float* vb = bottom_tm.channel(j/4 + j%4); const float* va = kernel_tm.channel(i/4 + i%4); int k=0; #if __SSE__ __m128 _sum0 = _mm_set1_ps(0.f); for (; k+3<L; k+=4) { __m128 _p0 = _mm_loadu_ps(vb); __m128 _k0 = _mm_loadu_ps(va); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_p0, _k0)); va += 4; vb += 4; } float sum0 = bias0 + _sum0[0] + _sum0[1] + _sum0[2] + _sum0[3]; #else float sum0 = bias0; #endif // __SSE__ for (; k<L; k++) { sum0 += va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum0; output++; } } } } }
GB_binop__div_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__div_int64) // A.*B function (eWiseMult): GB (_AemultB_08__div_int64) // A.*B function (eWiseMult): GB (_AemultB_02__div_int64) // A.*B function (eWiseMult): GB (_AemultB_04__div_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__div_int64) // A*D function (colscale): GB (_AxD__div_int64) // D*A function (rowscale): GB (_DxB__div_int64) // C+=B function (dense accum): GB (_Cdense_accumB__div_int64) // C+=b function (dense accum): GB (_Cdense_accumb__div_int64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__div_int64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__div_int64) // C=scalar+B GB (_bind1st__div_int64) // C=scalar+B' GB (_bind1st_tran__div_int64) // C=A+scalar GB (_bind2nd__div_int64) // C=A'+scalar GB (_bind2nd_tran__div_int64) // C type: int64_t // A type: int64_t // A pattern? 0 // B type: int64_t // B pattern? 0 // BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 64) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IDIV_SIGNED (x, y, 64) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_DIV || GxB_NO_INT64 || GxB_NO_DIV_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__div_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__div_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__div_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__div_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__div_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__div_int64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__div_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int64_t alpha_scalar ; int64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int64_t *) alpha_scalar_in)) ; beta_scalar = (*((int64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__div_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__div_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__div_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__div_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__div_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IDIV_SIGNED (x, bij, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__div_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IDIV_SIGNED (aij, y, 64) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (x, aij, 64) ; \ } GrB_Info GB (_bind1st_tran__div_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IDIV_SIGNED (aij, y, 64) ; \ } GrB_Info GB (_bind2nd_tran__div_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
work.c
/******************************************************************** * BenchIT - Performance Measurement for Scientific Applications * Contact: developer@benchit.org * * $Id: work.c 1 2009-09-11 12:26:19Z william $ * $URL: svn+ssh://william@rupert.zih.tu-dresden.de/svn-base/benchit-root/BenchITv6/kernel/memory/bandwidth/C/OpenMP/0/double_stream_opt/work.c $ * For license details see COPYING in the package base directory *******************************************************************/ /* Kernel: measure Bandwidth inspired by STREAM benchmark (C OMP-version) * * according to the rules, reffer this Benchmark as: * "BenchIT kernel based on a variant of the STREAM benchmark code" * when publishing results * * This file contains the work, that is done: copy,scale,add and triad *******************************************************************/ #include "work.h" /** * Copy: * for all threads: * for (j=0;j<repeats) * for (i=offset;i<size+offset) * alla[thread_nr][i]=allb[thread_nr][i] * resulting in size*repeats*2*sizeof(double) accessed bytes **/ double copy_(double **alla, double **allb, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads) { /* stores the measured time */ double time=0.0; /* start parallel work */ #pragma omp parallel { /* used for pinning threads */ long long mask; /* used for getting correct data */ int num,i,k,min,max; double *a; double *b; num=omp_get_thread_num(); if (localAlloc){ a = alla[num]; b = allb[num]; min=offset; max=size+offset; }else{ a = alla[0]; b = allb[0]; min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset; max=min+size/omp_get_num_threads()+offset-1; } #ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES if(pinThreads){ /* pin to correct core */ mask=1<<num; sched_setaffinity(0,sizeof(long long),&mask); /* done pinning to correct core */ } #endif /* take start time */ #pragma omp barrier if (num==0) time=bi_gettime(); #pragma omp barrier /* repeat measurement for accuracy */ for (k=0;k<repeats;k++) /* enable aligned access (may increase performance on x86 systems) */ #ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS #pragma vector aligned #endif /* enable nontemporal stores (may increase performance on x86 systems) */ #ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES #pragma vector nontemporal (a) #endif for (i=min;i<max;i++) { a[i]=b[i]; } #pragma omp barrier /* take end time */ if (num==0) time=bi_gettime()-time; } return time; } double scale_(double **alla, double **allb, double scalar, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads) { double time=0.0; #pragma omp parallel { long long mask; double *a; double *b; int num,i,k,min,max; num=omp_get_thread_num(); if (localAlloc){ a = alla[num]; b = allb[num]; min=offset; max=size+offset; }else{ a = alla[0]; b = allb[0]; min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset; max=min+size/omp_get_num_threads()+offset-1; } #ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES if(pinThreads){ /* pin to correct core */ mask=1<<num; sched_setaffinity(0,sizeof(long long),&mask); /* done pinning to correct core */ } #endif #pragma omp barrier if (num==0) time=bi_gettime(); #pragma omp barrier for (k=0;k<repeats;k++) #ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS #pragma vector aligned #endif #ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES #pragma vector nontemporal (a) #endif for (i=min;i<max;i++) { a[i]=b[i]*scalar; } #pragma omp barrier if (num==0) time=bi_gettime()-time; } return time; } double add_(double **alla, double **allb, double **allc, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads) { double time=0.0; #pragma omp parallel { long long mask; double *a; double *b; double *c; int num,i,k,min,max; num=omp_get_thread_num(); if (localAlloc){ a = alla[num]; b = allb[num]; c = allc[num]; min=offset; max=size+offset; }else{ a = alla[0]; b = allb[0]; c = allc[0]; min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset; max=min+size/omp_get_num_threads()+offset-1; } #ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES if(pinThreads){ /* pin to correct core */ mask=1<<num; sched_setaffinity(0,sizeof(long long),&mask); /* done pinning to correct core */ } #endif #pragma omp barrier if (num==0) time=bi_gettime(); #pragma omp barrier for (k=0;k<repeats;k++) #ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS #pragma vector aligned #endif #ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES #pragma vector nontemporal (a) #endif for (i=min;i<max;i++) { a[i]=b[i]+c[i]; } #pragma omp barrier if (num==0) time=bi_gettime()-time; } return time; } double triad_(double **alla, double **allb, double **allc, double scalar, unsigned long long size, int offset, long long repeats, int localAlloc, int pinThreads) { double time=0.0; #pragma omp parallel { long long mask; int num,i,k,min,max; double *a; double *b; double *c; num=omp_get_thread_num(); if (localAlloc){ a = alla[num]; b = allb[num]; c = allc[num]; min=offset; max=size+offset; }else{ a = alla[0]; b = allb[0]; c = allc[0]; min=((omp_get_thread_num()*size)/omp_get_num_threads())+offset; max=min+size/omp_get_num_threads()+offset-1; } #ifdef BENCHIT_KERNEL_COMPILE_FOR_PIN_THREADS_TO_CORES if(pinThreads){ /* pin to correct core */ mask=1<<num; sched_setaffinity(0,sizeof(long long),&mask); /* done pinning to correct core */ } #endif #pragma omp barrier if (num==0) time=bi_gettime(); #pragma omp barrier for (k=0;k<repeats;k++) #ifdef BENCHIT_KERNEL_ENABLE_ALIGNED_ACCESS #pragma vector aligned #endif #ifdef BENCHIT_KERNEL_ENABLE_NONTEMPORAL_STORES #pragma vector nontemporal (a) #endif for (i=min;i<max;i++) { a[i]=b[i]*scalar+c[i]; } #pragma omp barrier if (num==0) time=bi_gettime()-time; } return time; }
for-task-for.c
// RUN: %libomp-compile-and-run #include <stdio.h> #include <math.h> #include "omp_testsuite.h" #define NUM_OUTER_THREADS 16 #define NUM_INNER_THREADS 16 #define SMALL_LOOPCOUNT 64 /*! Utility function to spend some time in a loop */ static void do_some_work (void) { int i; double sum = 0; for(i = 0; i < 1000; i++) { sum += sqrt(i); } } int test_omp_parallel_for_task_for() { int vals[SMALL_LOOPCOUNT]; int i; for (i = 0; i < SMALL_LOOPCOUNT; i++) { vals[i] = 0; } #pragma omp parallel firstprivate(vals) num_threads(NUM_OUTER_THREADS) #pragma omp master { for (i = 1; i <= SMALL_LOOPCOUNT; i++) { #pragma omp task firstprivate(i) firstprivate(vals) { int local_sum = 0; int j; #pragma omp parallel for reduction(+:local_sum) \ num_threads(NUM_INNER_THREADS) for (j = 1; j <= SMALL_LOOPCOUNT; j++) { int k; do_some_work(); for (k = 0; k < j % 4; k++) { #pragma omp taskyield } local_sum += j; } for (j = 0; j < i % 5; j++) { #pragma omp taskyield } vals[i] = local_sum; } } } int num_failed = 0; int known_sum = SMALL_LOOPCOUNT * (SMALL_LOOPCOUNT + 1) / 2; for (i = 0; i < SMALL_LOOPCOUNT; i++) { if (vals[i] != known_sum) num_failed++; } return num_failed ? 1 : 0; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_parallel_for_task_for()) { num_failed++; } } return num_failed; }
pvector.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef PVECTOR_H_ #define PVECTOR_H_ #include <algorithm> /* GAP Benchmark Suite Class: pvector Author: Scott Beamer Vector class with ability to not initialize or do initialize in parallel - std::vector (when resizing) will always initialize, and does it serially - When pvector is resized, new elements are uninitialized - Resizing is not thread-safe */ template <typename T_> class pvector { public: typedef T_* iterator; pvector() : start_(nullptr), end_size_(nullptr), end_capacity_(nullptr) {} explicit pvector(size_t num_elements) { start_ = new T_[num_elements]; end_size_ = start_ + num_elements; end_capacity_ = end_size_; } pvector(size_t num_elements, T_ init_val) : pvector(num_elements) { fill(init_val); } pvector(iterator copy_begin, iterator copy_end) : pvector(copy_end - copy_begin) { #pragma omp parallel for for (size_t i=0; i < capacity(); i++) start_[i] = copy_begin[i]; } // don't want this to be copied, too much data to move pvector(const pvector &other) = delete; // prefer move because too much data to copy pvector(pvector &&other) : start_(other.start_), end_size_(other.end_size_), end_capacity_(other.end_capacity_) { other.start_ = nullptr; other.end_size_ = nullptr; other.end_capacity_ = nullptr; } // want move assignment pvector& operator= (pvector &&other) { start_ = other.start_; end_size_ = other.end_size_; end_capacity_ = other.end_capacity_; other.start_ = nullptr; other.end_size_ = nullptr; other.end_capacity_ = nullptr; return *this; } ~pvector() { if (start_ != nullptr) { // printf("Calling delete pvector\n"); delete[] start_; } } // not thread-safe void reserve(size_t num_elements) { if (num_elements > capacity()) { T_ *new_range = new T_[num_elements]; #pragma omp parallel for for (size_t i=0; i < size(); i++) new_range[i] = start_[i]; end_size_ = new_range + size(); delete[] start_; // printf("Deleting array during reserve\n"); start_ = new_range; end_capacity_ = start_ + num_elements; } } bool empty() { return end_size_ == start_; } void clear() { end_size_ = start_; } void resize(size_t num_elements) { reserve(num_elements); end_size_ = start_ + num_elements; } T_& operator[](size_t n) { return start_[n]; } const T_& operator[](size_t n) const { return start_[n]; } void push_back(T_ val) { if (size() == capacity()) { size_t new_size = capacity() == 0 ? 1 : capacity() * growth_factor; reserve(new_size); } *end_size_ = val; end_size_++; } void fill(T_ init_val) { #pragma omp parallel for for (T_* ptr=start_; ptr < end_size_; ptr++) *ptr = init_val; } size_t capacity() const { return end_capacity_ - start_; } size_t size() const { return end_size_ - start_; } iterator begin() const { return start_; } iterator end() const { return end_size_; } T_* data() const { return start_; } void swap(pvector &other) { std::swap(start_, other.start_); std::swap(end_size_, other.end_size_); std::swap(end_capacity_, other.end_capacity_); } private: T_* start_; T_* end_size_; T_* end_capacity_; static const size_t growth_factor = 2; }; #endif // PVECTOR_H_
bmh_parallel.c
#include<stdio.h> #include<string.h> #include<stdlib.h> #include<omp.h> #define NUM_THREADS 4 int occ[256]; int* lastocc(char str[]) { int i; char a; for(i=0;i<128;i++) occ[i]=-1; int len=strlen(str); for(i=0;i<len-1;i++) { a=str[i]; occ[a]=i; } return occ; } void bmh(char *t,int start,int end,char *p) { int *locc; int i0,j,m,n; n=end-start+1; m=strlen(p); locc=lastocc(p); i0=start; while(i0<=end-m+1) { j=m-1; while(j>=0 && p[j]==t[i0+j]) j--; if(j<0) printf("Pattern found at %d\n",i0); i0+=m-1; i0-=locc[t[i0]]; } } int main() { char pat[10]; char *text; int n,m,i=0; size_t size = 0; /* Open your_file in read-only mode */ FILE *fp = fopen("gene.txt", "r"); /* Get the buffer size */ fseek(fp, 0, SEEK_END); /* Go to end of file */ size = ftell(fp); /* How many bytes did we pass ? */ /* Set position of stream to the beginning */ rewind(fp); /* Allocate the buffer (no need to initialize it with calloc) */ text = malloc((size + 1) * sizeof(*text)); /* size + 1 byte for the \0 */ /* Read the file into the buffer */ fread(text, size, 1, fp); /* Read 1 chunk of size bytes from fp into buffer */ /* NULL-terminate the buffer */ text[size] = '\0'; scanf("%s",pat); int lenp=strlen(pat); printf("Length of pattern: %d\n",lenp); printf("Length of pattern: %d\n",strlen(text)); int bs=strlen(text)/NUM_THREADS; int rem=strlen(text)%NUM_THREADS; printf("bs: %d rem: %d\n",bs,rem); printf("num of threads %d\n",NUM_THREADS); int tid,start,end; #pragma omp parallel num_threads(NUM_THREADS) private(tid,start,end) shared(text,pat,rem,bs,m) { tid=omp_get_thread_num(); printf("tid %d\n",tid); if(tid==0) { #pragma omp critical (part1) { start=tid; end=bs-1; printf("start: %d end: %d\n",start,end); printf("tid= %d text block : %d ... %d\n",tid,start,end); bmh(text,start,end,pat); } } else { #pragma omp critical (part2) { start=(tid*bs)-lenp; end=(tid*bs)+bs-1; printf("start: %d end: %d\n",start,end); printf("tid= %d text block : %d ... %d\n",tid,start,end); bmh(text,start,end,pat); } } } if(rem!=0) bmh(text,(NUM_THREADS+1)*bs,strlen(text),pat); return 0; }
GB_binop__islt_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_01__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_02__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_03__islt_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_int32) // A*D function (colscale): GB (_AxD__islt_int32) // D*A function (rowscale): GB (_DxB__islt_int32) // C+=B function (dense accum): GB (_Cdense_accumB__islt_int32) // C+=b function (dense accum): GB (_Cdense_accumb__islt_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_int32) // C=scalar+B GB (_bind1st__islt_int32) // C=scalar+B' GB (_bind1st_tran__islt_int32) // C=A+scalar GB (_bind2nd__islt_int32) // C=A'+scalar GB (_bind2nd_tran__islt_int32) // C type: int32_t // A type: int32_t // B,b type: int32_t // BinaryOp: cij = (aij < bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x < y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLT || GxB_NO_INT32 || GxB_NO_ISLT_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__islt_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__islt_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__islt_int32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__islt_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__islt_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__islt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__islt_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__islt_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__islt_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x < bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__islt_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij < y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x < aij) ; \ } GrB_Info GB (_bind1st_tran__islt_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij < y) ; \ } GrB_Info GB (_bind2nd_tran__islt_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subassign_05.c
//------------------------------------------------------------------------------ // GB_subassign_05: C(I,J)<M> = scalar ; no S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 05: C(I,J)<M> = scalar ; no S // M: present // Mask_comp: false // C_replace: false // accum: NULL // A: scalar // S: none // C: not bitmap // M: any sparsity #include "GB_subassign_methods.h" GrB_Info GB_subassign_05 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const void *scalar, const GrB_Type atype, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_MATRIX_WAIT_IF_JUMBLED (C) ; GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_GET_C ; // C must not be bitmap int64_t zorig = C->nzombies ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; const int64_t Cnvec = C->nvec ; GB_GET_MASK ; GB_GET_SCALAR ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 05: C(I,J)<M> = scalar ; no S //-------------------------------------------------------------------------- // Time: Close to Optimal: the method must iterate over all entries in M, // so the time is Omega(nnz(M)). For each entry M(i,j)=1, the // corresponding entry in C must be found and updated (inserted or // modified). This method does this with a binary search of C(:,jC) or a // direct lookup if C(:,jC) is dense. The time is thus O(nnz(M)*log(n)) in // the worst case, usually less than that since C(:,jC) often has O(1) // entries. An additional time of O(|J|*log(Cnvec)) is added if C is // hypersparse. There is no equivalent method that computes // C(I,J)<M>=scalar using the matrix S. // Method 05 and Method 07 are very similar. Also compare with Method 06n. //-------------------------------------------------------------------------- // Parallel: slice M into coarse/fine tasks (Method 05, 06n, 07) //-------------------------------------------------------------------------- GB_SUBASSIGN_ONE_SLICE (M) ; // M cannot be jumbled //-------------------------------------------------------------------------- // phase 1: undelete zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; int64_t cjnz = pC_end - pC_start ; bool cjdense = (cjnz == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (cjdense) { //-------------------------------------------------------------- // C(:,jC) is dense so the binary search of C is not needed //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; GB_iC_DENSE_LOOKUP ; // ----[C A 1] or [X A 1]------------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } } } else { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (cij_found) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_scalar ; } else { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; zorig = C->nzombies ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; bool cjdense = ((pC_end - pC_start) == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = scalar ; no S //------------------------------------------------------------------ if (!cjdense) { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- bool mij = GBB (Mb, pM) && GB_mcast (Mx, pM, msize) ; if (mij) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (!cij_found) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
Metric.h
// // Created by Jin Zhu on 2020/2/18. // // #define R_BUILD #ifndef SRC_METRICS_H #define SRC_METRICS_H #include <algorithm> #include <random> #include <vector> #include "Algorithm.h" #include "Data.h" #include "utilities.h" template <class T1, class T2, class T3, class T4> // To do: calculate loss && all to one && lm poisson cox class Metric { public: bool is_cv; int Kfold; int ic_type; // Eigen::Matrix<T2, Dynamic, 1> cv_initial_model_param; // Eigen::Matrix<T3, Dynamic, 1> cv_initial_coef0; std::vector<Eigen::VectorXi> cv_initial_A; std::vector<Eigen::VectorXi> cv_initial_I; std::vector<Eigen::VectorXi> train_mask_list; std::vector<Eigen::VectorXi> test_mask_list; std::vector<T4> train_X_list; std::vector<T4> test_X_list; std::vector<T1> train_y_list; std::vector<T1> test_y_list; std::vector<Eigen::VectorXd> train_weight_list; std::vector<Eigen::VectorXd> test_weight_list; std::vector<FIT_ARG<T2, T3>> cv_init_fit_arg; // std::vector<std::vector<T4>> group_XTX_list; double ic_coef; Metric() = default; Metric(int ic_type, double ic_coef = 1.0, int Kfold = 5) { this->is_cv = Kfold > 1; this->ic_type = ic_type; this->Kfold = Kfold; this->ic_coef = ic_coef; if (is_cv) { cv_init_fit_arg.resize(Kfold); train_X_list.resize(Kfold); test_X_list.resize(Kfold); train_y_list.resize(Kfold); test_y_list.resize(Kfold); test_weight_list.resize(Kfold); train_weight_list.resize(Kfold); } }; void set_cv_init_fit_arg(int beta_size, int M) { for (int i = 0; i < this->Kfold; i++) { T2 beta_init; T3 coef0_init; coef_set_zero(beta_size, M, beta_init, coef0_init); Eigen::VectorXi A_init; Eigen::VectorXd bd_init; FIT_ARG<T2, T3> fit_arg(0, 0., beta_init, coef0_init, bd_init, A_init); cv_init_fit_arg[i] = fit_arg; } } // void set_cv_initial_model_param(int Kfold, int p) // { // this->cv_initial_model_param = Eigen::MatrixXd::Zero(p, Kfold); // }; // void set_cv_initial_A(int Kfold, int p) // { // vector<Eigen::VectorXi> tmp(Kfold); // this->cv_initial_A = tmp; // }; // void set_cv_initial_coef0(int Kfold, int p) // { // vector<double> tmp(Kfold); // for (int i = 0; i < Kfold; i++) // tmp[i] = 0; // this->cv_initial_coef0 = tmp; // }; // void update_cv_initial_model_param(Eigen::VectorXd model_param, int k) // { // this->cv_initial_model_param.col(k) = model_param; // } // void update_cv_initial_A(Eigen::VectorXi A, int k) // { // this->cv_initial_A[k] = A; // } // void update_cv_initial_coef0(double coef0, int k) // { // this->cv_initial_coef0[k] = coef0; // } void set_cv_train_test_mask(Data<T1, T2, T3, T4> &data, int n, Eigen::VectorXi &cv_fold_id) { Eigen::VectorXi index_list(n); std::vector<int> index_vec((unsigned int)n); std::vector<Eigen::VectorXi> group_list((unsigned int)this->Kfold); for (int i = 0; i < n; i++) { index_vec[i] = i; } if (cv_fold_id.size() == 0) { // std::random_device rd; std::mt19937 g(123); std::shuffle(index_vec.begin(), index_vec.end(), g); for (int i = 0; i < n; i++) { index_list(i) = index_vec[i]; } Eigen::VectorXd loss_list(this->Kfold); int group_size = int(n / this->Kfold); for (int k = 0; k < (this->Kfold - 1); k++) { group_list[k] = index_list.segment(int(k * group_size), group_size); } group_list[this->Kfold - 1] = index_list.segment(int((this->Kfold - 1) * group_size), n - int(int(this->Kfold - 1) * group_size)); } else { // given cv_fold_id auto rule = [cv_fold_id](int i, int j) -> bool { return cv_fold_id(i) < cv_fold_id(j); }; std::sort(index_vec.begin(), index_vec.end(), rule); for (int i = 0; i < n; i++) { index_list(i) = index_vec[i]; } int k = 0, st = 0, ed = 1; while (k < this->Kfold && ed < n) { int mask = cv_fold_id(index_list(st)); while (ed < n && mask == cv_fold_id(index_list(ed))) ed++; group_list[k] = index_list.segment(st, ed - st); st = ed; ed++; k++; } } for (int k = 0; k < this->Kfold; k++) { std::sort(group_list[k].data(), group_list[k].data() + group_list[k].size()); } // cv train-test partition: std::vector<Eigen::VectorXi> train_mask_list_tmp((unsigned int)this->Kfold); std::vector<Eigen::VectorXi> test_mask_list_tmp((unsigned int)this->Kfold); for (int k = 0; k < this->Kfold; k++) { int train_x_size = n - group_list[k].size(); // get train_mask Eigen::VectorXi train_mask(train_x_size); int i = 0; for (int j = 0; j < this->Kfold; j++) { if (j != k) { for (int s = 0; s < group_list[j].size(); s++) { train_mask(i) = group_list[j](s); i++; } } } std::sort(train_mask.data(), train_mask.data() + train_mask.size()); train_mask_list_tmp[k] = train_mask; test_mask_list_tmp[k] = group_list[k]; slice(data.x, train_mask, this->train_X_list[k]); slice(data.x, group_list[k], this->test_X_list[k]); slice(data.y, train_mask, this->train_y_list[k]); slice(data.y, group_list[k], this->test_y_list[k]); slice(data.weight, train_mask, this->train_weight_list[k]); slice(data.weight, group_list[k], this->test_weight_list[k]); } this->train_mask_list = train_mask_list_tmp; this->test_mask_list = test_mask_list_tmp; }; // void cal_cv_group_XTX(Data<T1, T2, T3> &data) // { // int p = data.p; // Eigen::VectorXi index = data.g_index; // Eigen::VectorXi gsize = data.g_size; // int N = data.g_num; // std::vector<std::vector<Eigen::MatrixXd>> group_XTX_list_tmp(this->Kfold); // for (int k = 0; k < this->Kfold; k++) // { // int train_size = this->train_mask_list[k].size(); // Eigen::MatrixXd train_x(train_size, p); // for (int i = 0; i < train_size; i++) // { // train_x.row(i) = data.x.row(this->train_mask_list[k](i)); // }; // group_XTX_list_tmp[k] = group_XTX(train_x, index, gsize, train_size, p, N, 1); // } // this->group_XTX_list = group_XTX_list_tmp; // } double ic(int train_n, int M, int N, Algorithm<T1, T2, T3, T4> *algorithm) { double loss; if (algorithm->model_type == 1 || algorithm->model_type == 5) { loss = train_n * log(algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum()); } else { loss = 2 * (algorithm->get_train_loss() - algorithm->lambda_level * algorithm->beta.cwiseAbs2().sum()); } if (ic_type == 1) { return loss + 2.0 * algorithm->get_effective_number(); } else if (ic_type == 2) { return loss + this->ic_coef * log(double(train_n)) * algorithm->get_effective_number(); } else if (ic_type == 3) { return loss + this->ic_coef * log(double(N)) * log(log(double(train_n))) * algorithm->get_effective_number(); } else if (ic_type == 4) { return loss + this->ic_coef * (log(double(train_n)) + 2 * log(double(N))) * algorithm->get_effective_number(); } else return 0; }; double loss_function(T4 &train_x, T1 &train_y, Eigen::VectorXd &train_weight, Eigen::VectorXi &g_index, Eigen::VectorXi &g_size, int train_n, int p, int N, Algorithm<T1, T2, T3, T4> *algorithm) { Eigen::VectorXi A = algorithm->get_A_out(); T2 beta = algorithm->get_beta(); T3 coef0 = algorithm->get_coef0(); Eigen::VectorXi A_ind = find_ind(A, g_index, g_size, beta.rows(), N); T4 X_A = X_seg(train_x, train_n, A_ind, algorithm->model_type); T2 beta_A; slice(beta, A_ind, beta_A); // Eigen::VectorXd beta_A(A_ind.size()); // for (int k = 0; k < A_ind.size(); k++) // { // beta_A(k) = beta(A_ind(k)); // } return algorithm->loss_function(X_A, train_y, train_weight, beta_A, coef0, A, g_index, g_size, 0.0); } // to do Eigen::VectorXd fit_and_evaluate_in_metric(std::vector<Algorithm<T1, T2, T3, T4> *> algorithm_list, Data<T1, T2, T3, T4> &data, FIT_ARG<T2, T3> &fit_arg) { Eigen::VectorXd loss_list(this->Kfold); if (!is_cv) { algorithm_list[0]->update_sparsity_level(fit_arg.support_size); algorithm_list[0]->update_lambda_level(fit_arg.lambda); algorithm_list[0]->update_beta_init(fit_arg.beta_init); algorithm_list[0]->update_bd_init(fit_arg.bd_init); algorithm_list[0]->update_coef0_init(fit_arg.coef0_init); algorithm_list[0]->update_A_init(fit_arg.A_init, data.g_num); algorithm_list[0]->fit(data.x, data.y, data.weight, data.g_index, data.g_size, data.n, data.p, data.g_num); if (algorithm_list[0]->get_warm_start()) { fit_arg.beta_init = algorithm_list[0]->get_beta(); fit_arg.coef0_init = algorithm_list[0]->get_coef0(); fit_arg.bd_init = algorithm_list[0]->get_bd(); } loss_list(0) = this->ic(data.n, data.M, data.g_num, algorithm_list[0]); } else { Eigen::VectorXi g_index = data.g_index; Eigen::VectorXi g_size = data.g_size; int p = data.p; int N = data.g_num; #pragma omp parallel for // parallel for (int k = 0; k < this->Kfold; k++) { // get test_x, test_y int test_n = this->test_mask_list[k].size(); int train_n = this->train_mask_list[k].size(); // train & test data // Eigen::MatrixXd train_x = matrix_slice(data.x, this->train_mask_list[k], 0); // Eigen::MatrixXd test_x = matrix_slice(data.x, this->test_mask_list[k], 0); // Eigen::VectorXd train_y = vector_slice(data.y, this->train_mask_list[k]); // Eigen::VectorXd test_y = vector_slice(data.y, this->test_mask_list[k]); // Eigen::VectorXd train_weight = vector_slice(data.weight, this->train_mask_list[k]); // Eigen::VectorXd test_weight = vector_slice(data.weight, this->test_mask_list[k]); // Eigen::VectorXd beta_init; algorithm_list[k]->update_sparsity_level(fit_arg.support_size); algorithm_list[k]->update_lambda_level(fit_arg.lambda); algorithm_list[k]->update_beta_init(this->cv_init_fit_arg[k].beta_init); algorithm_list[k]->update_bd_init(this->cv_init_fit_arg[k].bd_init); algorithm_list[k]->update_coef0_init(this->cv_init_fit_arg[k].coef0_init); algorithm_list[k]->update_A_init(this->cv_init_fit_arg[k].A_init, N); // beta_init = this->cv_initial_model_param.col(k).eval(); // algorithm->update_beta_init(beta_init); // algorithm->update_coef0_init(this->cv_initial_coef0[k]); // algorithm->update_A_init(this->cv_initial_A[k], N); // algorithm->update_train_mask(this->train_mask_list[k]); // ?????????????????????????????????????????????????????????????? algorithm_list[k]->fit(this->train_X_list[k], this->train_y_list[k], this->train_weight_list[k], g_index, g_size, train_n, p, N); if (algorithm_list[k]->get_warm_start()) { this->cv_init_fit_arg[k].beta_init = algorithm_list[k]->get_beta(); this->cv_init_fit_arg[k].coef0_init = algorithm_list[k]->get_coef0(); this->cv_init_fit_arg[k].bd_init = algorithm_list[k]->get_bd(); // this->update_cv_initial_model_param(algorithm->get_beta(), k); // this->update_cv_initial_A(algorithm->get_A_out(), k); // this->update_cv_initial_coef0(algorithm->get_coef0(), k); } loss_list(k) = this->loss_function(this->test_X_list[k], this->test_y_list[k], this->test_weight_list[k], g_index, g_size, test_n, p, N, algorithm_list[k]); } } return loss_list; }; }; #endif // SRC_METRICS_H
mediancut.c
/* ** © 2009-2018 by Kornel Lesiński. ** © 1989, 1991 by Jef Poskanzer. ** © 1997, 2000, 2002 by Greg Roelofs; based on an idea by Stefan Schneider. ** ** See COPYRIGHT file for license. */ #include <stdlib.h> #include <stddef.h> #include "libimagequant.h" #include "pam.h" #include "mediancut.h" #define index_of_channel(ch) (offsetof(f_pixel,ch)/sizeof(float)) static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]); struct box { f_pixel color; f_pixel variance; double sum, total_error, max_error; unsigned int ind; unsigned int colors; }; ALWAYS_INLINE static double variance_diff(double val, const double good_enough); inline static double variance_diff(double val, const double good_enough) { val *= val; if (val < good_enough*good_enough) return val*0.25; return val; } /** Weighted per-channel variance of the box. It's used to decide which channel to split by */ static f_pixel box_variance(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double variancea=0, variancer=0, varianceg=0, varianceb=0; for(unsigned int i = 0; i < box->colors; ++i) { const f_pixel px = achv[box->ind + i].acolor; double weight = achv[box->ind + i].adjusted_weight; variancea += variance_diff(mean.a - px.a, 2.0/256.0)*weight; variancer += variance_diff(mean.r - px.r, 1.0/256.0)*weight; varianceg += variance_diff(mean.g - px.g, 1.0/256.0)*weight; varianceb += variance_diff(mean.b - px.b, 1.0/256.0)*weight; } return (f_pixel){ .a = variancea*(4.0/16.0), .r = variancer*(7.0/16.0), .g = varianceg*(9.0/16.0), .b = varianceb*(5.0/16.0), }; } static double box_max_error(const hist_item achv[], const struct box *box) { f_pixel mean = box->color; double max_error = 0; for(unsigned int i = 0; i < box->colors; ++i) { const double diff = colordifference(mean, achv[box->ind + i].acolor); if (diff > max_error) { max_error = diff; } } return max_error; } ALWAYS_INLINE static double color_weight(f_pixel median, hist_item h); static inline void hist_item_swap(hist_item *l, hist_item *r) { if (l != r) { hist_item t = *l; *l = *r; *r = t; } } ALWAYS_INLINE static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len); inline static unsigned int qsort_pivot(const hist_item *const base, const unsigned int len) { if (len < 32) { return len/2; } const unsigned int aidx=8, bidx=len/2, cidx=len-1; const unsigned int a=base[aidx].tmp.sort_value, b=base[bidx].tmp.sort_value, c=base[cidx].tmp.sort_value; return (a < b) ? ((b < c) ? bidx : ((a < c) ? cidx : aidx )) : ((b > c) ? bidx : ((a < c) ? aidx : cidx )); } ALWAYS_INLINE static unsigned int qsort_partition(hist_item *const base, const unsigned int len); inline static unsigned int qsort_partition(hist_item *const base, const unsigned int len) { unsigned int l = 1, r = len; if (len >= 8) { hist_item_swap(&base[0], &base[qsort_pivot(base,len)]); } const unsigned int pivot_value = base[0].tmp.sort_value; while (l < r) { if (base[l].tmp.sort_value >= pivot_value) { l++; } else { while(l < --r && base[r].tmp.sort_value <= pivot_value) {} hist_item_swap(&base[l], &base[r]); } } l--; hist_item_swap(&base[0], &base[l]); return l; } /** quick select algorithm */ static void hist_item_sort_range(hist_item base[], unsigned int len, unsigned int sort_start) { for(;;) { const unsigned int l = qsort_partition(base, len), r = l+1; if (l > 0 && sort_start < l) { len = l; } else if (r < len && sort_start > r) { base += r; len -= r; sort_start -= r; } else break; } } /** sorts array to make sum of weights lower than halfvar one side, returns edge between <halfvar and >halfvar parts of the set */ static hist_item *hist_item_sort_halfvar(hist_item base[], unsigned int len, double *const lowervar, const double halfvar) { do { const unsigned int l = qsort_partition(base, len), r = l+1; // check if sum of left side is smaller than half, // if it is, then it doesn't need to be sorted unsigned int t = 0; double tmpsum = *lowervar; while (t <= l && tmpsum < halfvar) tmpsum += base[t++].color_weight; if (tmpsum < halfvar) { *lowervar = tmpsum; } else { if (l > 0) { hist_item *res = hist_item_sort_halfvar(base, l, lowervar, halfvar); if (res) return res; } else { // End of left recursion. This will be executed in order from the first element. *lowervar += base[0].color_weight; if (*lowervar > halfvar) return &base[0]; } } if (len > r) { base += r; len -= r; // tail-recursive "call" } else { *lowervar += base[r].color_weight; return (*lowervar > halfvar) ? &base[r] : NULL; } } while(1); } static f_pixel get_median(const struct box *b, hist_item achv[]); typedef struct { unsigned int chan; float variance; } channelvariance; static int comparevariance(const void *ch1, const void *ch2) { return ((const channelvariance*)ch1)->variance > ((const channelvariance*)ch2)->variance ? -1 : (((const channelvariance*)ch1)->variance < ((const channelvariance*)ch2)->variance ? 1 : 0); } /** Finds which channels need to be sorted first and preproceses achv for fast sort */ static double prepare_sort(struct box *b, hist_item achv[]) { /* ** Sort dimensions by their variance, and then sort colors first by dimension with highest variance */ channelvariance channels[4] = { {index_of_channel(a), b->variance.a}, {index_of_channel(r), b->variance.r}, {index_of_channel(g), b->variance.g}, {index_of_channel(b), b->variance.b}, }; qsort(channels, 4, sizeof(channels[0]), comparevariance); const unsigned int ind1 = b->ind; const unsigned int colors = b->colors; #if __GNUC__ >= 9 || __clang__ #pragma omp parallel for if (colors > 25000) \ schedule(static) default(none) shared(achv, channels, colors, ind1) #else #pragma omp parallel for if (colors > 25000) \ schedule(static) default(none) shared(achv, channels) #endif for(unsigned int i=0; i < colors; i++) { const float *chans = (const float *)&achv[ind1 + i].acolor; // Only the first channel really matters. When trying median cut many times // with different histogram weights, I don't want sort randomness to influence outcome. achv[ind1 + i].tmp.sort_value = ((unsigned int)(chans[channels[0].chan]*65535.0)<<16) | (unsigned int)((chans[channels[2].chan] + chans[channels[1].chan]/2.0 + chans[channels[3].chan]/4.0)*65535.0); } const f_pixel median = get_median(b, achv); // box will be split to make color_weight of each side even const unsigned int ind = b->ind, end = ind+b->colors; double totalvar = 0; #pragma omp parallel for if (end - ind > 15000) \ schedule(static) default(shared) reduction(+:totalvar) for(unsigned int j=ind; j < end; j++) totalvar += (achv[j].color_weight = color_weight(median, achv[j])); return totalvar / 2.0; } /** finds median in unsorted set by sorting only minimum required */ static f_pixel get_median(const struct box *b, hist_item achv[]) { const unsigned int median_start = (b->colors-1)/2; hist_item_sort_range(&(achv[b->ind]), b->colors, median_start); if (b->colors&1) return achv[b->ind + median_start].acolor; // technically the second color is not guaranteed to be sorted correctly // but most of the time it is good enough to be useful return averagepixels(2, &achv[b->ind + median_start]); } /* ** Find the best splittable box. -1 if no boxes are splittable. */ static int best_splittable_box(struct box bv[], unsigned int boxes, const double max_mse) { int bi=-1; double maxsum=0; for(unsigned int i=0; i < boxes; i++) { if (bv[i].colors < 2) { continue; } // looks only at max variance, because it's only going to split by it const double cv = MAX(bv[i].variance.r, MAX(bv[i].variance.g,bv[i].variance.b)); double thissum = bv[i].sum * MAX(bv[i].variance.a, cv); if (bv[i].max_error > max_mse) { thissum = thissum* bv[i].max_error/max_mse; } if (thissum > maxsum) { maxsum = thissum; bi = i; } } return bi; } inline static double color_weight(f_pixel median, hist_item h) { float diff = colordifference(median, h.acolor); return sqrt(diff) * (sqrt(1.0+h.adjusted_weight)-1.0); } static void set_colormap_from_boxes(colormap *map, struct box bv[], unsigned int boxes, hist_item *achv); static void adjust_histogram(hist_item *achv, const struct box bv[], unsigned int boxes); static double box_error(const struct box *box, const hist_item achv[]) { f_pixel avg = box->color; double total_error=0; for (unsigned int i = 0; i < box->colors; ++i) { total_error += colordifference(avg, achv[box->ind + i].acolor) * achv[box->ind + i].perceptual_weight; } return total_error; } static bool total_box_error_below_target(double target_mse, struct box bv[], unsigned int boxes, const histogram *hist) { target_mse *= hist->total_perceptual_weight; double total_error=0; for(unsigned int i=0; i < boxes; i++) { // error is (re)calculated lazily if (bv[i].total_error >= 0) { total_error += bv[i].total_error; } if (total_error > target_mse) return false; } for(unsigned int i=0; i < boxes; i++) { if (bv[i].total_error < 0) { bv[i].total_error = box_error(&bv[i], hist->achv); total_error += bv[i].total_error; } if (total_error > target_mse) return false; } return true; } static void box_init(struct box *box, const hist_item *achv, const unsigned int ind, const unsigned int colors, const double sum) { box->ind = ind; box->colors = colors; box->sum = sum; box->total_error = -1; box->color = averagepixels(colors, &achv[ind]); box->variance = box_variance(achv, box); box->max_error = box_max_error(achv, box); } /* ** Here is the fun part, the median-cut colormap generator. This is based ** on Paul Heckbert's paper, "Color Image Quantization for Frame Buffer ** Display," SIGGRAPH 1982 Proceedings, page 297. */ LIQ_PRIVATE colormap *mediancut(histogram *hist, unsigned int newcolors, const double target_mse, const double max_mse, void* (*malloc)(size_t), void (*free)(void*)) { hist_item *achv = hist->achv; LIQ_ARRAY(struct box, bv, newcolors); unsigned int boxes = 1; /* ** Set up the initial box. */ { double sum = 0; for(unsigned int i=0; i < hist->size; i++) { sum += achv[i].adjusted_weight; } box_init(&bv[0], achv, 0, hist->size, sum); /* ** Main loop: split boxes until we have enough. */ while (boxes < newcolors) { // first splits boxes that exceed quality limit (to have colors for things like odd green pixel), // later raises the limit to allow large smooth areas/gradients get colors. const double current_max_mse = max_mse + (boxes/(double)newcolors)*16.0*max_mse; const int bi = best_splittable_box(bv, boxes, current_max_mse); if (bi < 0) { break; /* ran out of colors! */ } unsigned int indx = bv[bi].ind; unsigned int clrs = bv[bi].colors; /* Classic implementation tries to get even number of colors or pixels in each subdivision. Here, instead of popularity I use (sqrt(popularity)*variance) metric. Each subdivision balances number of pixels (popular colors) and low variance - boxes can be large if they have similar colors. Later boxes with high variance will be more likely to be split. Median used as expected value gives much better results than mean. */ const double halfvar = prepare_sort(&bv[bi], achv); double lowervar=0; // hist_item_sort_halfvar sorts and sums lowervar at the same time // returns item to break at …minus one, which does smell like an off-by-one error. hist_item *break_p = hist_item_sort_halfvar(&achv[indx], clrs, &lowervar, halfvar); unsigned int break_at = MIN(clrs-1, break_p - &achv[indx] + 1); /* ** Split the box. */ double sm = bv[bi].sum; double lowersum = 0; for(unsigned int i=0; i < break_at; i++) lowersum += achv[indx + i].adjusted_weight; box_init(&bv[bi], achv, indx, break_at, lowersum); box_init(&bv[boxes], achv, indx + break_at, clrs - break_at, sm - lowersum); ++boxes; if (total_box_error_below_target(target_mse, bv, boxes, hist)) { break; } } } colormap *map = pam_colormap(boxes, malloc, free); set_colormap_from_boxes(map, bv, boxes, achv); adjust_histogram(achv, bv, boxes); return map; } static void set_colormap_from_boxes(colormap *map, struct box* bv, unsigned int boxes, hist_item *achv) { /* ** Ok, we've got enough boxes. Now choose a representative color for ** each box. There are a number of possible ways to make this choice. ** One would be to choose the center of the box; this ignores any structure ** within the boxes. Another method would be to average all the colors in ** the box - this is the method specified in Heckbert's paper. */ for(unsigned int bi = 0; bi < boxes; ++bi) { map->palette[bi].acolor = bv[bi].color; /* store total color popularity (perceptual_weight is approximation of it) */ map->palette[bi].popularity = 0; for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { map->palette[bi].popularity += achv[i].perceptual_weight; } } } /* increase histogram popularity by difference from the final color (this is used as part of feedback loop) */ static void adjust_histogram(hist_item *achv, const struct box* bv, unsigned int boxes) { for(unsigned int bi = 0; bi < boxes; ++bi) { for(unsigned int i=bv[bi].ind; i < bv[bi].ind+bv[bi].colors; i++) { achv[i].tmp.likely_colormap_index = bi; } } } static f_pixel averagepixels(unsigned int clrs, const hist_item achv[]) { double r = 0, g = 0, b = 0, a = 0, sum = 0; #pragma omp parallel for if (clrs > 25000) \ schedule(static) default(shared) reduction(+:a) reduction(+:r) reduction(+:g) reduction(+:b) reduction(+:sum) for(unsigned int i = 0; i < clrs; i++) { const f_pixel px = achv[i].acolor; const double weight = achv[i].adjusted_weight; sum += weight; a += px.a * weight; r += px.r * weight; g += px.g * weight; b += px.b * weight; } if (sum) { a /= sum; r /= sum; g /= sum; b /= sum; } assert(!isnan(r) && !isnan(g) && !isnan(b) && !isnan(a)); return (f_pixel){.r=r, .g=g, .b=b, .a=a}; }
NEC_scheme.c
/* ============================================================================= Copyright (c) 2013, Institute for Microelectronics, TU Wien http://www.iue.tuwien.ac.at ----------------- ViennaWD - The Vienna Wigner Decoherence Algorithms Ensemble Monte Carlo Simulator ----------------- authors: Marek Pobjecky Mihail Nedjalkov nedjalkov@iue.tuwien.ac.at license: see file LICENSE in the base directory ============================================================================= */ #include <math.h> #include "emc.h" #include <omp.h> /********************************************************************/ /* NEC method for the charge assignment at the node points */ /********************************************************************/ int oooChargeAssignmentNEC(const_t constpar, geometry_t *geometry, scatpar_t *scatpar, el_data_t *particles, phys_quant_t *phys_quantities) { static int i, j, n; static double denn, teglo, elecNumber[MAXNX][MAXNY], normFactor; /*=== Evaluate multiplication constant ===*/ normFactor = 1.0 / (geometry->cellVolume * constpar.Ni); /*=== Reset the charge vector ===*/ for (i = 0; i <= geometry->nxmax; ++i) for (j = 0; j <= geometry->nymax; ++j) elecNumber[i][j] = 0.0; // #pragma omp parallel // #pragma omp for #pragma omp for schedule(static, 1) /*=== Charge assignment part ===*/ for (n = 0; n <= scatpar->n_used; ++n) { i = (int) (particles[n].p[5] / geometry->meshSize); j = (int) (particles[n].p[6] / geometry->meshSize); teglo = particles[n].p[7] * 0.25; elecNumber[i] [j] += teglo; elecNumber[i] [j+1] += teglo; elecNumber[i+1][j] += teglo; elecNumber[i+1][j+1] += teglo; } /*=== Calculate electron density ===*/ for (i = 0; i <= geometry->nxmax; ++i) for (j = 0; j <= geometry->nymax; ++j) { denn = elecNumber[i][j]; if (i == 0 || i == geometry->nxmax) denn *= 2.0; if (j == 0 || j == geometry->nymax) denn *= 2.0; phys_quantities->elecDensity[i][j] = denn * normFactor; } return 0; }
GB_unaryop__ainv_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_uint16 // op(A') function: GB_tran__ainv_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_uint16 ( int8_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int32_int8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int32_int8) // op(A') function: GB (_unop_tran__identity_int32_int8) // C type: int32_t // A type: int8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = aij #define GB_ATYPE \ int8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int32_t z = (int32_t) aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int32_int8) ( int32_t *Cx, // Cx and Ax may be aliased const int8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int8_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int8_t aij = Ax [p] ; int32_t z = (int32_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int32_int8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
SourceCode.c
#ifdef _WIN32 #include <stdio.h> #else #define _GNU_SOURCE // for fcloseall() on linux #include <stdio.h> #endif #include <stdbool.h> #include <string.h> #include <stdlib.h> #include <omp.h> #include <math.h> #include <SpiceUsr.h> #include <PAConfig.h> #ifdef _WIN32 #include <windows.h> // needed for search wu directory #include <direct.h> // only needed for _mkdir() #else #include <unistd.h> // only needed for usleep() #include <dirent.h> // needed for search wu directory #include <sys/stat.h> // only needed for mkdir() #endif // Activate timing: Add preprocessor definition "__WTIMING" #ifdef __WTIMING #include <time.h> #endif //BEGIN Function cross-platform compatibility #ifdef _WIN32 #define SLEEP( a1 ) Sleep( a1 ) #define mkdir( a1, a2 ) _mkdir( a1 ) #else #define SLEEP( a1 ) usleep( a1 * 1000 ) #endif #ifdef _WIN32 // Avoid MSVC level 3 warning C4996 #define strdup _strdup #define strcpy strcpy_s #define sscanf sscanf_s #define strtok_r strtok_s #define strcat strcat_s #define fcloseall _fcloseall #else #define strcat( a1, a2, a3 ) strcat(a1, a3) #define strcpy( a1, a2, a3 ) strcpy( a1, a3 ) #define fopen_s( a1, a2, a3 ) *a1 = fopen( a2, a3 ) #define sprintf_s snprintf #endif #ifdef _WIN32 // platform-specific separators in paths #define OS_SEP "\\" #else #define OS_SEP "/" #endif //END Function cross-platform compatibility #define PI_half 3.14159265358979323846/2 //Global variables int velflag = 0; int suncflag = 0; int nodeflag = 0; int tminflag = 0; int tmaxflag = 0; int mminflag = 0; int mmaxflag = 0; int objectflag = 0; int intgflag = 0; int pc2flag = 0; int WUcheckflag = 0; int wu_count = 0; int all_wu_count = 0; int particles_count = 0; int all_particles_count = 0; char *cometwu_path, *output_path; double timespec[3]; float massspec[2]; char *object_id; double object_state[6]; double node_orbitplane_normvec[3]; double Sun_GM, Sun_state[6]; int N_bodys = 0; int body_ID[10]; double body_GM[10]; double dv_step; double max_distance; //Functions int parse_input(int argc, char* argv[]); char **get_WU_paths(void); char *search_WUsummary(void); char **select_WUs(char *WUsummary_name); char **search_WUs(void); float *get_particles(char **wu_paths); int get_nearest_state(int z, float *fwu_array); int get_state_nearest_to_orbital_plane(int z, float *fwu_array); float *get_all_target_states(float *nearest_states); void calculate_target_state(double *nstate, double GM); void integrate_target_state(double *nstate, double GM); void calc_accel(int N_bodys, double Sun_GM, double pos[], double **body_state[], double *accel); float *filter_particles_out_of_range(float *target_states); int save_target_states(float *target_states); void WUcheck(char *WUsummary_name); //Main Program int main(int argc, char* argv[]) { #ifdef __WTIMING clock_t start = clock(); // Start clock #endif //Print version printf("ParticleAnalyser version " PA_VERSION_MAJOR "." PA_VERSION_MINOR "\n"); //Load Spice kernels printf("Loading kernels... "); furnsh_c("kernels_generic.txt"); printf("...done."); //Parse input arguments if (parse_input(argc, argv) != 0){ return 1; } //Get paths to all work units that contain relevant particles char **wu_paths; wu_paths = get_WU_paths(); if (wu_paths == NULL){ if (WUcheckflag == 1){ return 0; } return 1; } //If there are no relevant WUs, but no error has occurred, return success if (wu_count == 0){ free(wu_paths); return 0; } //Load all relevant particles and get the states that are closest to target time / orbital plane float *nearest_states; nearest_states = get_particles(wu_paths); if (nearest_states == NULL){ return 1; } //If there are no relevant particles, but no error has occurred, return success if (particles_count == 0){ free(nearest_states); return 0; } //Calculate target states float *target_states; target_states = get_all_target_states(nearest_states); if (target_states == NULL){ return 1; } //Filter particles if (objectflag == 1 && nodeflag == 0){ target_states = filter_particles_out_of_range(target_states); if (target_states == NULL){ return 1; } if (particles_count == 0){ free(target_states); return 0; } } //Write output file if (save_target_states(target_states) != 0){ return 1; } #ifdef __WTIMING //Print elapsed time clock_t end = clock(); double elapsed_time = (end - start) / (double)CLOCKS_PER_SEC; printf("\n\n Elapsed time: %1.3f s\n", elapsed_time); #endif return 0; } //Functions int parse_input(int argc, char *argv[]) { int i = 1, x = 3, valid; while (i < argc) /* Scan through args. */ { if (strncmp(argv[i], "--", 2) == 0) /* Check for option character. */ { if (strspn("input", argv[i]) == 5){ //This is the directory containing the WUs cometwu_path = argv[i + 1]; i = i + 2; x--; } else if (strspn("output", argv[i]) == 6){ //This is the file to write the results to output_path = argv[i + 1]; i = i + 2; x--; } else if (strspn("WUcheck", argv[i]) == 7){ WUcheckflag = 1; i = i + 1; x--; } else if (strspn("time", argv[i]) == 4){ //This is the target time timespec[0] = atof(argv[i + 1]); i = i + 2; x--; } else if (strspn("tmin", argv[i]) == 4){ //Particles must have formed after this date timespec[1] = atof(argv[i + 1]); tminflag = 1; i = i + 2; } else if (strspn("tmax", argv[i]) == 4){ //Particles must have formed before this date timespec[2] = atof(argv[i + 1]); tmaxflag = 1; i = i + 2; } else if (strspn("mmin", argv[i]) == 4){ //Particles must have a mass higher than mmin massspec[0] = (float)atof(argv[i + 1]); mminflag = 1; i = i + 2; } else if (strspn("mmax", argv[i]) == 4){ //Particles must have a mass lower than mmax massspec[1] = (float)atof(argv[i + 1]); mmaxflag = 1; i = i + 2; } else if (strspn("object", argv[i]) == 6){ //Specify a SPICE conform body ID and a distance object_id = argv[i + 1]; // only particles within that distance will be written to output max_distance = atof(argv[i + 2]); objectflag = 1; i = i + 3; } else if (strspn("intg", argv[i]) == 4){ //Specifies wether or not to integrate particle target states dv_step = atof(argv[i + 1]); // first parameter is dv_step N_bodys = atoi(argv[i + 2]) + 1; // second parameter is number of planets! Following parameters are NAIF IDs of the planets int j; SpiceInt dim; body_ID[0] = 10; // Sun is automatically added for (j = 1; j < N_bodys; j++){ body_ID[j] = atoi(argv[i + 2 + j]); bodvcd_c(body_ID[j], "GM", N_bodys, &dim, &body_GM[j]); // Get standard gravitational parameter of each body (GM) } intgflag = 1; i = i + 3 + N_bodys - 1; } else{ printf("\n\nerror: input argument %d is invalid", i); return 1; } } else if (strncmp(argv[i], "-", 1) == 0) { valid = 0; if (strspn("s", argv[i]) != 0){ //Specifies wether or not to convert to Sun-centered coordinates suncflag = 1; // before calculating orbital elements valid = 1; } if (strspn("v", argv[i]) != 0){ //Specifies wether or not velocities will be written to output velflag = 1; valid = 1; } if (strspn("n", argv[i]) != 0){ //Specifies wether or not to compute particle nodes nodeflag = 1; valid = 1; } if (strspn("p", argv[i]) != 0){ //Specifies wether or not to write pointcache output format pc2flag = 1; valid = 1; } if (valid == 1){ i++; } else { //No valid character found printf("\n\nerror: input argument %d is invalid", i); return 1; } } else{ printf("\n\nerror: input argument %d is invalid", i); return 1; } } //Check overall validity of input if (x > 0){ printf("\n\nerror: not enough input arguments\n arguments --input , --output and --time must always be set\n"); return 1; } if (nodeflag == 1 && objectflag == 0){ printf("\n\nerror: specify an object in order to compute nodes (-n requires --object to be set)\n"); return 1; } //Load additional kernels if (objectflag == 1 || suncflag == 1){ furnsh_c("kernels_spk.txt"); //object kernel directory must be specified in "kernels_spk.txt" } //Set up object location for relative coordinates computation double lt; if (objectflag == 1){ if (suncflag == 1){ spkezr_c(object_id, timespec[0], "ECLIPJ2000", "NONE", "SUN", object_state, &lt); } else{ spkezr_c(object_id, timespec[0], "ECLIPJ2000", "NONE", "SSB", object_state, &lt); } //Set up orbital plane for node computation if (nodeflag == 1){ double length; node_orbitplane_normvec[0] = object_state[1] * object_state[5] - object_state[2] * object_state[4]; node_orbitplane_normvec[1] = object_state[2] * object_state[3] - object_state[0] * object_state[5]; node_orbitplane_normvec[2] = object_state[0] * object_state[4] - object_state[1] * object_state[3]; length = sqrt(node_orbitplane_normvec[0] * node_orbitplane_normvec[0] + node_orbitplane_normvec[1] * node_orbitplane_normvec[1] + node_orbitplane_normvec[2] * node_orbitplane_normvec[2]); if (length != 0){ node_orbitplane_normvec[0] = node_orbitplane_normvec[0] / length; node_orbitplane_normvec[1] = node_orbitplane_normvec[1] / length; node_orbitplane_normvec[2] = node_orbitplane_normvec[2] / length; } else{ printf("\n\nwarning: length of orbit-plane normal vector is zero. Body may have coordinates 0/0/0\n"); printf(" ...computing nodes in J2000 eclpiptic\n\n"); node_orbitplane_normvec[0] = 0; node_orbitplane_normvec[1] = 0; node_orbitplane_normvec[2] = 1; } } } //Get Sun's position relative to Barycentric center (little to no effect) if (suncflag == 1){ spkezr_c("SUN", timespec[0], "ECLIPJ2000", "NONE", "SSB", Sun_state, &lt); } return 0; } char **get_WU_paths(void) { char *WUsummary_name, **wu_paths_buffer, **wu_paths; WUsummary_name = search_WUsummary(); if (WUsummary_name == NULL){ if (WUcheckflag == 1){ printf("\n Failed to check for missing WUs because no WU summary file was found"); return NULL; } wu_paths_buffer = search_WUs(); } else{ if (WUcheckflag == 1){ WUcheck(WUsummary_name); return NULL; } wu_paths_buffer = select_WUs(WUsummary_name); } if (wu_paths_buffer == NULL){ return NULL; } int i; wu_paths = malloc((wu_count+1) * sizeof(char*)); if (wu_paths == NULL){ printf("...failed."); printf("\n\nerror: could not allocate wu_paths array (OOM)"); return NULL; } for (i = 0; i < wu_count; i++){ wu_paths[i] = strdup(wu_paths_buffer[i]); free(wu_paths_buffer[i]); } free(wu_paths_buffer); return wu_paths; } char *search_WUsummary(void){ //Looking for WU summary file printf("\nLooking for WU summary file... "); char *WUsummary_name; #ifdef _WIN32 // Get path WUsummary under windows systems HANDLE hFind; WIN32_FIND_DATA FindData; char findpath[256]; strcpy(findpath, 256, cometwu_path); strcat(findpath, 256, OS_SEP); strcat(findpath, 256, "*.txt"); hFind = FindFirstFile(findpath, &FindData); if (hFind == INVALID_HANDLE_VALUE){ printf("...not found."); return NULL; } WUsummary_name = strdup(FindData.cFileName); FindClose(hFind); printf("...found."); return WUsummary_name; #else // Get path WUsummary of WUs under linux DIR *dir; char *ext; struct dirent *ent; dir = opendir(cometwu_path); if (dir){ while ((ent = readdir(dir)) != NULL){ ext = strrchr(ent->d_name, '.'); if (!ext){ continue; } else if (strncmp((ext + 1), "txt", 3) == 0){ // makes sure you only try to read .txt files WUsummary_name = strdup(ent->d_name); printf("...found."); return WUsummary_name; } } } printf("...not found."); return NULL; #endif } char **select_WUs(char *WUsummary_name) { printf("\nSelecting work units... "); char **all_wu_paths, WUsummary_path[256]; all_wu_paths = malloc(100000 * sizeof(char*)); // Max number of WUs is 100,000 if (all_wu_paths == NULL){ printf("...failed."); printf("\n\nerror: could not allocate all_wu_paths array (OOM)"); return NULL; } FILE *WUsummary_file; strcpy(WUsummary_path, 256, cometwu_path); strcat(WUsummary_path, 256, OS_SEP); strcat(WUsummary_path, 256, WUsummary_name); fopen_s(&WUsummary_file, WUsummary_path, "r"); if (WUsummary_file == NULL){ printf("...failed."); printf("\n\nerror: could not open WU summary file; may be open in another program"); return NULL; } else{ char temp[512], *next_token = NULL; float WU_mass = 0; double WU_times[2]; int wu_missing_count = 0, WU_number_of_particles = 0, first_line = 1; while ((fgets(temp, sizeof(temp), WUsummary_file)) != NULL){ if (first_line == 1){ first_line = 0; //Skips first line in the WUsummary continue; } all_wu_count++; char* cval = strtok_r(temp, "\t", &next_token); char* WU_name = cval; cval = strtok_r(NULL, "\t", &next_token); sscanf(cval, "%d", &WU_number_of_particles); cval = strtok_r(NULL, "\t", &next_token); sscanf(cval, "%f", &WU_mass); cval = strtok_r(NULL, "\t", &next_token); sscanf(cval, "%lf", &WU_times[0]); cval = strtok_r(NULL, "\n", &next_token); sscanf(cval, "%lf", &WU_times[1]); if (WU_times[0] > timespec[0]){ // Particles must have been created before target time continue; } if (tminflag == 1){ if (WU_times[1] < timespec[1]){ // Particles must have been created after tmin continue; } } if (tmaxflag == 1){ if (WU_times[0] > timespec[2]){ // Particles must have been created before tmax continue; } } if (mminflag == 1){ if (WU_mass < massspec[0]){ // Particles must be more massive than mmin continue; } } if (mmaxflag == 1){ if (WU_mass > massspec[1]){ // Particles must be less massive than mmax continue; } } FILE *ftest; char full_wu_path[256]; strcpy(full_wu_path, 256, cometwu_path); strcat(full_wu_path, 256, OS_SEP); strcat(full_wu_path, 256, WU_name); strcat(full_wu_path, 256, ".ctwu"); fopen_s(&ftest, full_wu_path, "rb"); if (ftest == NULL){ wu_missing_count++; continue; } fclose(ftest); all_wu_paths[wu_count] = strdup(full_wu_path); wu_count++; all_particles_count += WU_number_of_particles; } fclose(WUsummary_file); printf("...done.\n The summary lists %d WUs, %d of which are relevant.", all_wu_count, (wu_count + wu_missing_count)); if (wu_missing_count != 0){ printf("\n %d of the selected WUs are missing.", wu_missing_count); } if (wu_count == 0){ printf(" No WUs to process; quiting."); return all_wu_paths; } return all_wu_paths; } } char **search_WUs(void) { //This function searches the INPUT path for all files named *.ctwu // It determines what WUs are relevant and saves their names printf("\nSearching for work units... "); char **all_wu_paths; all_wu_paths = malloc(100000 * sizeof(char*)); // Max number of WUs is 100,000 if (all_wu_paths == NULL){ printf("...failed."); printf("\n\nerror: could not allocate all_wu_paths array (OOM)"); return NULL; } #ifdef _WIN32 // Get paths of WUs under windows systems HANDLE hFind; WIN32_FIND_DATA FindData; char findpath[256]; strcpy(findpath, 256, cometwu_path); strcat(findpath, 256, OS_SEP); strcat(findpath, 256, "*.ctwu"); // makes sure you only try to read .ctwu files hFind = FindFirstFile(findpath, &FindData); if (hFind == INVALID_HANDLE_VALUE){ printf("...failed."); printf("\n\nerror: could not find work units in %s", cometwu_path); free(all_wu_paths); return NULL; } all_wu_paths[0] = strdup(FindData.cFileName); all_wu_count++; while (FindNextFile(hFind, &FindData)){ all_wu_paths[all_wu_count] = strdup(FindData.cFileName); all_wu_count++; } FindClose(hFind); #else // Get paths of WUs under linux DIR *dir; char *ext; struct dirent *ent; dir = opendir(cometwu_path); if (dir){ while ((ent = readdir(dir)) != NULL){ ext = strrchr(ent->d_name, '.'); if (!ext){ continue; } else if (strncmp((ext + 1), "ctw", 3) == 0){ // makes sure you only try to read .ctwu files all_wu_paths[all_wu_count] = strdup(ent->d_name); all_wu_count++; } } } else{ printf("...failed."); printf("\n\nerror: could not find directory %s", cometwu_path); free(all_wu_paths); return NULL; } if (all_wu_count == 0){ printf("...failed."); printf("\n\nerror: could not find work units in %s", cometwu_path); free(all_wu_paths); return NULL; } closedir(dir); #endif int i, wu_fails = 0; char **wu_paths; wu_paths = malloc(all_wu_count * sizeof(char*)); if (wu_paths == NULL){ printf("...failed."); printf("\n\nerror: could not allocate wu_paths array (OOM)"); for (i = 0; i < all_wu_count; i++){ free(all_wu_paths[i]); } free(all_wu_paths); return NULL; } //Check WUs' relevance in parallel #pragma omp parallel { FILE *finput; float wu_header[7]; #pragma omp for for (i = 0; i < all_wu_count; i++){ //Open WU and read header line char full_wu_path[256]; strcpy(full_wu_path, 256, cometwu_path); strcat(full_wu_path, 256, OS_SEP); strcat(full_wu_path, 256, all_wu_paths[i]); fopen_s(&finput, full_wu_path, "rb"); if (finput == NULL){ #pragma omp atomic wu_fails++; continue; } fread(wu_header, sizeof(float), 7, finput); fclose(finput); //Check relevance if (wu_header[5] > timespec[0]){ // Particles must have been created before target time continue; } if (tminflag == 1){ if (wu_header[6] < timespec[1]){ // Particles must have been created after tmin continue; } } if (tmaxflag == 1){ if (wu_header[5] > timespec[2]){ // Particles must have been created before tmax continue; } } if (mminflag == 1){ if (wu_header[2] < massspec[0]){ // Particles must be more massive than mmin continue; } } if (mmaxflag == 1){ if (wu_header[2] > massspec[1]){ // Particles must be less massive than mmax continue; } } #pragma omp critical(WRITE_WU_PATHS) { wu_paths[wu_count] = strdup(full_wu_path); wu_count++; } #pragma omp atomic all_particles_count += (int)(wu_header[1] - wu_header[0] + 1.5); } } for (i = 0; i < all_wu_count; i++){ free(all_wu_paths[i]); } free(all_wu_paths); if (wu_fails != 0){ printf("...failed."); printf("\n\nerror: %d work units could not be read", wu_fails); for (i = 0; i < wu_count; i++){ free(wu_paths[i]); } free(wu_paths); return NULL; } //fcloseall(); //Causes a Spice bug when calling kernels. Possibly closes kernel files without Spice knowing? printf("...done.\n %d WUs were found, %d of which are relevant.", all_wu_count, wu_count); if (wu_count == 0){ printf(" No WUs to process; quiting."); return wu_paths; } return wu_paths; } float *get_particles(char **wu_paths) { //This funtcion looks up relevant particles within the WUs found by **get_WU_paths, // gets the properties of these particles and the nearest ouput state printf("\n\nLoading particles... "); float *nearest_states; nearest_states = malloc(all_particles_count * 12 * sizeof(float)); if (nearest_states == NULL){ printf("...failed to load particles.\n\nerror: could not allocate nearest_states array (OOM)"); return nearest_states; } SpiceInt dim; int i, wu_fails = 0; bodvcd_c(10, "GM", 1, &dim, &Sun_GM); //Get standard gravitational parameter of the Sun #pragma omp parallel num_threads(1) { FILE *finput; int wu_rows, fsize, corrupt_count; //Loop over all relevant WUs in parallel #pragma omp for for (i = 0; i < wu_count; i++) { corrupt_count = 0; fopen_s(&finput, wu_paths[i], "rb"); //Open WU if (finput == NULL){ #pragma omp atomic wu_fails++; continue; } fseek(finput, 0, SEEK_END); //Check WU number of rows to determine required array size fsize = ftell(finput); wu_rows = fsize / (7 * sizeof(float)); rewind(finput); float *fwu_array; fwu_array = calloc((wu_rows + 1) * 7, sizeof(float)); if (fwu_array == NULL){ #pragma omp atomic wu_fails++; continue; } fread(fwu_array, sizeof(float), wu_rows * 7, finput); //Load WU into array fclose(finput); int j = 1, z, k, wu_particles_total, wu_particle_count = 0; wu_particles_total = (int)(fwu_array[1] - fwu_array[0] + 1.5); //Check number of particles in WU float *wu_nearest_states; wu_nearest_states = malloc(wu_particles_total * 12 * sizeof(float)); //Create array to save the nearest particle states in if (wu_nearest_states == NULL){ #pragma omp atomic wu_fails++; continue; } //Find particles in ouput file and get nearest state while (j < wu_rows){ if (fwu_array[j * 7] == 0){ z = j + 1; if (tminflag == 1){ if (fwu_array[z * 7 + 6] < timespec[1]) { j++; continue; } } if (tmaxflag == 1){ if (fwu_array[z * 7 + 6] > timespec[2]) { j++; continue; } } if (fwu_array[z * 7 + 6] > timespec[0]){ j++; continue; } z = get_nearest_state(z, fwu_array); if (z == j){ j++; continue; } else if (z == -1){ // particle has 0/0/0 coordinates -> corrupt j++; corrupt_count++; continue; } else if (z == -2){ // particle fell into the sun and will be ignored j++; continue; } if (nodeflag == 1){ z = get_state_nearest_to_orbital_plane(z, fwu_array); } //Write particle to private array with following formatting: // 0-2 Position ; 3-5 Velocity ; 6 time ; 7 particle number ; 8 Multiplication factor ; 9 mass ; 10 beta ; 11 time or origin for (k = 0; k < 7; k++){ wu_nearest_states[wu_particle_count * 12 + k] = fwu_array[z * 7 + k]; } wu_nearest_states[wu_particle_count * 12 + 7] = fwu_array[j * 7 + 4]; wu_nearest_states[wu_particle_count * 12 + 8] = fwu_array[j * 7 + 5]; wu_nearest_states[wu_particle_count * 12 + 9] = fwu_array[2]; wu_nearest_states[wu_particle_count * 12 + 10] = fwu_array[4]; wu_nearest_states[wu_particle_count * 12 + 11] = fwu_array[(j+1) * 7 + 6]; wu_particle_count++; j = z; } j++; } //Alert if WU might be corrupt if (corrupt_count != 0){ printf("\n %d particles left out because they have 0/0/0 coordinates. WU name:\n %s\n ", corrupt_count, wu_paths[i]); } //Write particles of current WU to shared array #pragma omp critical(WRITE_NEAREST) { for (j = 0; j < wu_particle_count; j++){ for (k = 0; k < 12; k++){ nearest_states[(particles_count + j) * 12 + k] = wu_nearest_states[j * 12 + k]; } } particles_count += wu_particle_count; } free(wu_nearest_states); free(fwu_array); } } for (i = 0; i < wu_count; i++){ free(wu_paths[i]); } free(wu_paths); if (wu_fails != 0){ printf("...failed to load particles."); printf("\n\nerror: %d work units failed to be processed", wu_fails); return NULL; } else{ if (particles_count != all_particles_count){ if (particles_count == 0){ //This can occur when no particle is within specified distance printf("...no relevant particles found (%d).\n", all_particles_count); return nearest_states; } float *temp_nearest_states; temp_nearest_states = realloc(nearest_states, particles_count * 12 * sizeof(float)); if (temp_nearest_states == NULL){ printf("...failed to load particles."); printf("\n\nerror: could not reallocate nearest_states array (OOM)"); free(nearest_states); return NULL; } else{ nearest_states = temp_nearest_states; } } printf("...done. %d relevant particles loaded.", particles_count); } //fcloseall(); //Causes a Spice bug when calling kernels. Possibly closes kernel files without Spice knowing? return nearest_states; } int get_nearest_state(int z, float *fwu_array) { // This function searches the particle output for the state closest to the target time int i = z; while (fwu_array[(i + 1) * 7 + 6] < timespec[0]){ if (fwu_array[(i + 1) * 7 + 6] == 0){ break; } i++; } if (fwu_array[i * 7 + 1] == 99){ return -2; // This particle was terminated because it fell into the sun. Particle will be ignored. } if (fwu_array[i * 7] == 0){ if (fwu_array[i * 7 + 1] == 0){ if (fwu_array[i * 7 + 2] == 0){ return -1; // SPICE can't process particles with coordinates 0/0/0, WU might be corrupt } } } // Particles too distant to body are filtered out (only for nodes) if (nodeflag == 1 || (intgflag == 1 && objectflag == 1)){ int k; double orb_elts[8], state[6], etime, beta, Sun_GM_beta, rel_pos[3], distance; beta = (double)fwu_array[4]; Sun_GM_beta = Sun_GM * (1 - beta); for (k = 0; k < 6; k++){ state[k] = (double)fwu_array[i * 7 + k]; } etime = (double)fwu_array[i * 7 + 6]; if (suncflag == 1){ for (k = 0; k < 6; k++){ state[k] -= Sun_state[k]; } } #pragma omp critical(SPICE) { oscelt_c(state, etime, Sun_GM_beta, orb_elts); conics_c(orb_elts, timespec[0], state); } rel_pos[0] = state[0] - object_state[0]; rel_pos[1] = state[1] - object_state[1]; rel_pos[2] = state[2] - object_state[2]; if (suncflag == 1){ for (k = 0; k < 6; k++){ state[k] += Sun_state[k]; } } distance = sqrt(rel_pos[0] * rel_pos[0] + rel_pos[1] * rel_pos[1] + rel_pos[2] * rel_pos[2]); if (distance > max_distance){ return (z - 1); } } return i; } int get_state_nearest_to_orbital_plane(int z, float *fwu_array) { //This funtion searches the output for the nearest state of a particle to the orbital plane // This state is later used to compute the node int i; double plane_distance[3]; for (i = 0; i < 100; i++){ if (i == 100){ printf("\nwarning: could not find closest state to orbital plane with 100 iterations.\n Particle inclination may be very low. Consider lowering max distance\n"); } plane_distance[0] = fabs(fwu_array[z * 7 + 0] * node_orbitplane_normvec[0] + fwu_array[z * 7 + 1] * node_orbitplane_normvec[1] + fwu_array[z * 7 + 2] * node_orbitplane_normvec[2]); if (fwu_array[(z - 1) * 7 + 6] != 0){ plane_distance[1] = fabs(fwu_array[(z - 1) * 7 + 0] * node_orbitplane_normvec[0] + fwu_array[(z - 1) * 7 + 1] * node_orbitplane_normvec[1] + fwu_array[(z - 1) * 7 + 2] * node_orbitplane_normvec[2]); if (plane_distance[1] < plane_distance[0]){ z--; continue; } } if (fwu_array[(z + 1) * 7 + 6] != 0){ plane_distance[2] = fabs(fwu_array[(z + 1) * 7 + 0] * node_orbitplane_normvec[0] + fwu_array[(z + 1) * 7 + 1] * node_orbitplane_normvec[1] + fwu_array[(z + 1) * 7 + 2] * node_orbitplane_normvec[2]); if (plane_distance[2] < plane_distance[0]){ z++; continue; } } break; } return z; } float *get_all_target_states(float *nearest_states) { // This function administrates the calculation of all relevant particles in parallel if (intgflag == 0){ printf("\nCalculating target states... "); } else{ printf("\nIntegrating target states... "); } int i; #pragma omp parallel { double nstate[7], beta, Sun_GM_beta; int row, k; #pragma omp for for (i = 0; i < particles_count; i++){ row = i * 12; beta = (double) nearest_states[row + 10]; Sun_GM_beta = Sun_GM*(1 - beta); for (k = 0; k < 7; k++){ nstate[k] = (double) nearest_states[row + k]; } if (intgflag == 0){ calculate_target_state(nstate, Sun_GM_beta); } else{ integrate_target_state(nstate, Sun_GM_beta); } for (k = 0; k < 7; k++){ nearest_states[row + k] = (float) nstate[k]; } } } printf("...done."); return nearest_states; } void calculate_target_state(double *nstate, double Sun_GM_beta) { //This function calculates the particle state at the target time or the partcile node // (depending on what has been set) based on the nearest output state int k; double etime, orb_elts[8], state[6]; for (k = 0; k < 6; k++){ state[k] = nstate[k]; } etime = nstate[6]; if (suncflag == 1){ for (k = 0; k < 6; k++){ state[k] -= Sun_state[k]; } } #pragma omp critical(SPICE) { oscelt_c(state, etime, Sun_GM_beta, orb_elts); //Convert state to orbital elements } // If nodeflag is not set, calculate particle positions at target time using KEPLER orbits computed with SPICE if (nodeflag == 0){ #pragma omp critical(SPICE) { conics_c(orb_elts, timespec[0], state); //Convert orbital elements to state at target time } } else{ // If nodeflag is set, calculate the point at which the particle intersects the orbital plane (numerically) // -> converge towards orbital plane the distance is less than 100km. Then save the state. double plane_distance, particle_speed, angle_vel_norm, dt; plane_distance = state[0] * node_orbitplane_normvec[0] + state[1] * node_orbitplane_normvec[1] + state[2] * node_orbitplane_normvec[2]; while ( fabs(plane_distance) > 100){ plane_distance = state[0] * node_orbitplane_normvec[0] + state[1] * node_orbitplane_normvec[1] + state[2] * node_orbitplane_normvec[2]; particle_speed = sqrt(state[3] * state[3] + state[4] * state[4] + state[5] * state[5]); dt = fabs(plane_distance) / particle_speed; angle_vel_norm = acos((state[3] * node_orbitplane_normvec[0] + state[4] * node_orbitplane_normvec[1] + state[5] * node_orbitplane_normvec[2]) / particle_speed); //Decide wether the particle at the current state is moving towards or away from the orbital plane. //If it is moving away -> choose negative time step if ((plane_distance > 0 && angle_vel_norm < PI_half) || (plane_distance < 0 && angle_vel_norm > PI_half)){ dt = -dt; } etime += dt; #pragma omp critical(SPICE) { conics_c(orb_elts, etime, state); } } nstate[6] = etime; } if (suncflag == 1){ for (k = 0; k < 6; k++){ state[k] += Sun_state[k]; } } for (k = 0; k < 6; k++){ nstate[k] = state[k]; } } void integrate_target_state(double *nstate, double Sun_GM_beta) { //This function integrates the particle state at the target time // (depending on what has been set) based on the nearest output state int j; double lt, dt, dt2, abs_acc, pos[3]; double **body_pre, **body_mid, **body_end; double k_acc_1[3], k_acc_2[3], k_acc_3[3], k_acc_4[3]; double k_vel_1[3], k_vel_2[3], k_vel_3[3], k_vel_4[3]; body_pre = malloc(N_bodys * sizeof(int *)); body_mid = malloc(N_bodys * sizeof(int *)); body_end = malloc(N_bodys * sizeof(int *)); if (body_pre == NULL || body_mid == NULL || body_end == NULL) { printf("\nerror: could not allocate body state array (OOM)"); //Abort... } for (j = 0; j < N_bodys; j++) { body_pre[j] = malloc(3 * sizeof(double)); body_mid[j] = malloc(3 * sizeof(double)); body_end[j] = malloc(3 * sizeof(double)); if (body_pre[j] == NULL || body_mid[j] == NULL || body_end[j] == NULL) { printf("\n\nerror: could not allocate body state array (OOM)"); //Abort... } #pragma omp critical(SPICE) { //Critical section is only executed on one thread at a time (spice is not threadsafe) spkezp_c(body_ID[j], nstate[6], "ECLIPJ2000", "NONE", 0, body_end[j], &lt); } } //Integrate if (nodeflag == 0){ while (nstate[6] < timespec[0]) { for (j = 0; j < N_bodys; j++) { body_pre[j][0] = body_end[j][0]; body_pre[j][1] = body_end[j][1]; body_pre[j][2] = body_end[j][2]; } //Step 1 pos[0] = nstate[0]; pos[1] = nstate[1]; pos[2] = nstate[2]; calc_accel(N_bodys, Sun_GM_beta, nstate, &body_pre, k_acc_1); k_vel_1[0] = nstate[3]; k_vel_1[1] = nstate[4]; k_vel_1[2] = nstate[5]; //Set dynamic step size abs_acc = sqrt(k_acc_1[0] * k_acc_1[0] + k_acc_1[1] * k_acc_1[1] + k_acc_1[2] * k_acc_1[2]); dt = (dv_step / abs_acc); if (nstate[6] + dt > timespec[0]) { dt = fabs(timespec[0] - nstate[6]); //End on final_time exactly } dt2 = dt / 2; //Get body positions with SPICE for (j = 0; j < N_bodys; j++) { #pragma omp critical(SPICE) { //Critical section is only executed on one thread at a time (not thread-safe) spkezp_c(body_ID[j], nstate[6] + dt, "ECLIPJ2000", "NONE", 0, body_end[j], &lt); } body_mid[j][0] = (body_pre[j][0] + body_end[j][0]) / 2; body_mid[j][1] = (body_pre[j][1] + body_end[j][1]) / 2; body_mid[j][2] = (body_pre[j][2] + body_end[j][2]) / 2; } //Step 2 pos[0] = nstate[0] + k_vel_1[0] * dt2; pos[1] = nstate[1] + k_vel_1[1] * dt2; pos[2] = nstate[2] + k_vel_1[2] * dt2; calc_accel(N_bodys, Sun_GM_beta, pos, &body_mid, k_acc_2); k_vel_2[0] = nstate[3] + k_acc_1[0] * dt2; k_vel_2[1] = nstate[4] + k_acc_1[1] * dt2; k_vel_2[2] = nstate[5] + k_acc_1[2] * dt2; //Step 3 pos[0] = nstate[0] + k_vel_2[0] * dt2; pos[1] = nstate[1] + k_vel_2[1] * dt2; pos[2] = nstate[2] + k_vel_2[2] * dt2; calc_accel(N_bodys, Sun_GM_beta, pos, &body_mid, k_acc_3); k_vel_3[0] = nstate[3] + k_acc_2[0] * dt2; k_vel_3[1] = nstate[4] + k_acc_2[1] * dt2; k_vel_3[2] = nstate[5] + k_acc_2[2] * dt2; //Step 4 pos[0] = nstate[0] + k_vel_3[0] * dt; pos[1] = nstate[1] + k_vel_3[1] * dt; pos[2] = nstate[2] + k_vel_3[2] * dt; calc_accel(N_bodys, Sun_GM_beta, pos, &body_end, k_acc_4); k_vel_4[0] = nstate[3] + k_acc_3[0] * dt; k_vel_4[1] = nstate[4] + k_acc_3[1] * dt; k_vel_4[2] = nstate[5] + k_acc_3[2] * dt; //Update solution nstate[0] = nstate[0] + dt*(k_vel_1[0] + 2 * (k_vel_2[0] + k_vel_3[0]) + k_vel_4[0]) / 6; nstate[1] = nstate[1] + dt*(k_vel_1[1] + 2 * (k_vel_2[1] + k_vel_3[1]) + k_vel_4[1]) / 6; nstate[2] = nstate[2] + dt*(k_vel_1[2] + 2 * (k_vel_2[2] + k_vel_3[2]) + k_vel_4[2]) / 6; nstate[3] = nstate[3] + dt*(k_acc_1[0] + 2 * (k_acc_2[0] + k_acc_3[0]) + k_acc_4[0]) / 6; nstate[4] = nstate[4] + dt*(k_acc_1[1] + 2 * (k_acc_2[1] + k_acc_3[1]) + k_acc_4[1]) / 6; nstate[5] = nstate[5] + dt*(k_acc_1[2] + 2 * (k_acc_2[2] + k_acc_3[2]) + k_acc_4[2]) / 6; nstate[6] = nstate[6] + dt; } } else{ double plane_distance, particle_speed, particle_speed_perpendicular, angle_vel_norm, dt_max; plane_distance = nstate[0] * node_orbitplane_normvec[0] + nstate[1] * node_orbitplane_normvec[1] + nstate[2] * node_orbitplane_normvec[2]; while (fabs(plane_distance) > 100) { for (j = 0; j < N_bodys; j++) { body_pre[j][0] = body_end[j][0]; body_pre[j][1] = body_end[j][1]; body_pre[j][2] = body_end[j][2]; } //Step 1 pos[0] = nstate[0]; pos[1] = nstate[1]; pos[2] = nstate[2]; calc_accel(N_bodys, Sun_GM_beta, nstate, &body_pre, k_acc_1); k_vel_1[0] = nstate[3]; k_vel_1[1] = nstate[4]; k_vel_1[2] = nstate[5]; //Set dynamic step size abs_acc = sqrt(k_acc_1[0] * k_acc_1[0] + k_acc_1[1] * k_acc_1[1] + k_acc_1[2] * k_acc_1[2]); dt = (dv_step / abs_acc); particle_speed = sqrt(nstate[3] * nstate[3] + nstate[4] * nstate[4] + nstate[5] * nstate[5]); angle_vel_norm = acos((nstate[3] * node_orbitplane_normvec[0] + nstate[4] * node_orbitplane_normvec[1] + nstate[5] * node_orbitplane_normvec[2]) / particle_speed); if ((plane_distance > 0 && angle_vel_norm < PI_half) || (plane_distance < 0 && angle_vel_norm > PI_half)){ break; } particle_speed_perpendicular = fabs(cos(angle_vel_norm)) * particle_speed; dt_max = 0.9 * plane_distance / particle_speed_perpendicular; if (dt > dt_max) { dt = dt_max; //Don't overshoot orbital plane } dt2 = dt / 2; //Get body positions with SPICE for (j = 0; j < N_bodys; j++) { #pragma omp critical(SPICE) { //Critical section is only executed on one thread at a time (not thread-safe) spkezp_c(body_ID[j], nstate[6] + dt, "ECLIPJ2000", "NONE", 0, body_end[j], &lt); } body_mid[j][0] = (body_pre[j][0] + body_end[j][0]) / 2; body_mid[j][1] = (body_pre[j][1] + body_end[j][1]) / 2; body_mid[j][2] = (body_pre[j][2] + body_end[j][2]) / 2; } //Step 2 pos[0] = nstate[0] + k_vel_1[0] * dt2; pos[1] = nstate[1] + k_vel_1[1] * dt2; pos[2] = nstate[2] + k_vel_1[2] * dt2; calc_accel(N_bodys, Sun_GM_beta, pos, &body_mid, k_acc_2); k_vel_2[0] = nstate[3] + k_acc_1[0] * dt2; k_vel_2[1] = nstate[4] + k_acc_1[1] * dt2; k_vel_2[2] = nstate[5] + k_acc_1[2] * dt2; //Step 3 pos[0] = nstate[0] + k_vel_2[0] * dt2; pos[1] = nstate[1] + k_vel_2[1] * dt2; pos[2] = nstate[2] + k_vel_2[2] * dt2; calc_accel(N_bodys, Sun_GM_beta, pos, &body_mid, k_acc_3); k_vel_3[0] = nstate[3] + k_acc_2[0] * dt2; k_vel_3[1] = nstate[4] + k_acc_2[1] * dt2; k_vel_3[2] = nstate[5] + k_acc_2[2] * dt2; //Step 4 pos[0] = nstate[0] + k_vel_3[0] * dt; pos[1] = nstate[1] + k_vel_3[1] * dt; pos[2] = nstate[2] + k_vel_3[2] * dt; calc_accel(N_bodys, Sun_GM_beta, pos, &body_end, k_acc_4); k_vel_4[0] = nstate[3] + k_acc_3[0] * dt; k_vel_4[1] = nstate[4] + k_acc_3[1] * dt; k_vel_4[2] = nstate[5] + k_acc_3[2] * dt; //Update solution nstate[0] = nstate[0] + dt*(k_vel_1[0] + 2 * (k_vel_2[0] + k_vel_3[0]) + k_vel_4[0]) / 6; nstate[1] = nstate[1] + dt*(k_vel_1[1] + 2 * (k_vel_2[1] + k_vel_3[1]) + k_vel_4[1]) / 6; nstate[2] = nstate[2] + dt*(k_vel_1[2] + 2 * (k_vel_2[2] + k_vel_3[2]) + k_vel_4[2]) / 6; nstate[3] = nstate[3] + dt*(k_acc_1[0] + 2 * (k_acc_2[0] + k_acc_3[0]) + k_acc_4[0]) / 6; nstate[4] = nstate[4] + dt*(k_acc_1[1] + 2 * (k_acc_2[1] + k_acc_3[1]) + k_acc_4[1]) / 6; nstate[5] = nstate[5] + dt*(k_acc_1[2] + 2 * (k_acc_2[2] + k_acc_3[2]) + k_acc_4[2]) / 6; nstate[6] = nstate[6] + dt; plane_distance = nstate[0] * node_orbitplane_normvec[0] + nstate[1] * node_orbitplane_normvec[1] + nstate[2] * node_orbitplane_normvec[2]; } } //Deallocate body array for (j = 0; j < N_bodys; j++) { free(body_pre[j]); free(body_mid[j]); free(body_end[j]); } free(body_pre); free(body_mid); free(body_end); } void calc_accel(int N_bodys, double Sun_GM_beta, double pos[], double **body_state[], double *accel) { double direct_body[3], distance_pow3, GMr3; int b; //Sun direct_body[0] = (*body_state)[0][0] - pos[0]; direct_body[1] = (*body_state)[0][1] - pos[1]; direct_body[2] = (*body_state)[0][2] - pos[2]; distance_pow3 = pow(direct_body[0] * direct_body[0] + direct_body[1] * direct_body[1] + direct_body[2] * direct_body[2], 1.5); GMr3 = Sun_GM_beta / distance_pow3; accel[0] = GMr3 * direct_body[0]; accel[1] = GMr3 * direct_body[1]; accel[2] = GMr3 * direct_body[2]; //Other bodys for (b = 1; b < N_bodys; b++) { direct_body[0] = (*body_state)[b][0] - pos[0]; direct_body[1] = (*body_state)[b][1] - pos[1]; direct_body[2] = (*body_state)[b][2] - pos[2]; distance_pow3 = pow(direct_body[0] * direct_body[0] + direct_body[1] * direct_body[1] + direct_body[2] * direct_body[2], 1.5); GMr3 = body_GM[b] / distance_pow3; accel[0] += GMr3 * direct_body[0]; accel[1] += GMr3 * direct_body[1]; accel[2] += GMr3 * direct_body[2]; } } float *filter_particles_out_of_range(float *target_states) { // This function filters all the particles that are not within the specified distance to the object printf("\nCheck distances to object... "); float *filtered_states; filtered_states = malloc(particles_count * 12 * sizeof(float)); if (filtered_states == NULL){ printf("...failed to filter particles.\n\nerror: could not allocate filtered_states array (OOM)"); free(target_states); return NULL; } int i, filtered_particles_count = 0; #pragma omp parallel { double state[6], rel_state[6], distance; int row, k; #pragma omp for for (i = 0; i < particles_count; i++){ row = i * 12; for (k = 0; k < 6; k++){ state[k] = (double)target_states[row + k]; } for (k = 0; k < 6; k++){ //Calculate coordinates relative to object rel_state[k] = state[k] - object_state[k]; } distance = sqrt(rel_state[0] * rel_state[0] + rel_state[1] * rel_state[1] + rel_state[2] * rel_state[2]); if (distance < max_distance){ #pragma omp critical(WRITE_FILTERED) //If particle is within specified range, save to filtered_states array { for (k = 0; k < 6; k++){ filtered_states[filtered_particles_count * 12 + k] = (float)rel_state[k]; } for (k = 6; k < 12; k++){ filtered_states[filtered_particles_count * 12 + k] = target_states[row + k]; } filtered_particles_count++; } } } } free(target_states); particles_count = filtered_particles_count; printf("...done. %d particles within distance.", particles_count); return filtered_states; } int save_target_states(float *target_states) { //This function saves the results in an ouput file, of which the path has been defined in the application input arguments printf("\nWriting output... "); FILE *outputfile; fopen_s(&outputfile, output_path, "wb"); if (outputfile == NULL){ printf("...failed.\n\nerror: could not create output file"); return 1; } if (pc2flag == 1){ //Produce .PC2 format output (pointCache) //NOT TESTED! float *output_array; int i, k; char cacheSignature[12] = "POINTCACHE2"; int fileVersion = 1; int numPoints = particles_count; float startFrame = 1; float sampleRate = 1; int numSamples = 1; output_array = malloc(particles_count * 3 * sizeof(float)); if (output_array == NULL){ printf("...failed.\n\nerror: could not allocate output_array (OOM)"); fclose(outputfile); return 1; } for (i = 0; i < particles_count; i++){ for (k = 0; k < 3; k++){ output_array[i * 3 + k] = target_states[i * 12 + k]; } } fwrite(cacheSignature, sizeof(char), 12, outputfile); fwrite(&fileVersion, sizeof(int), 1, outputfile); fwrite(&numPoints, sizeof(int), 1, outputfile); fwrite(&startFrame, sizeof(float), 1, outputfile); fwrite(&sampleRate, sizeof(float), 1, outputfile); fwrite(&numSamples, sizeof(int), 1, outputfile); fwrite(output_array, sizeof(float), particles_count * 3, outputfile); free(output_array); } else if (nodeflag == 1){ //If nodeflag is set, then target_states is already properly formatted and ready for saving // 3 Position and 3 Velocity coordinates ; time ; particle number ; multiplication-factor ; mass ; beta ; time of origin fwrite(target_states, sizeof(float), particles_count * 12, outputfile); } else{ //Else take out time and potentially velocity coordinates float *output_array; int i, k; if (velflag == 1){ //Keep velocities in output file output_array = malloc(particles_count * 11 * sizeof(float)); if (output_array == NULL){ printf("...failed.\n\nerror: could not allocate output array (OOM)"); fclose(outputfile); return 1; } for (i = 0; i < particles_count; i++){ for (k = 0; k < 6; k++){ // 3 Position coordinates and 3 Velocity coordinates output_array[i*11 + k] = target_states[i*12 + k]; } for (k = 6; k < 11; k++){ // particle number ; multiplication-factor ; mass ; beta ; time of origin output_array[i*11 + k] = target_states[i*12 + k + 1]; } } fwrite(output_array, sizeof(float), particles_count * 11, outputfile); //Write to file } else{ //Throw out velocities in output file output_array = malloc(particles_count * 8 * sizeof(float)); if (output_array == NULL){ printf("...failed.\n\nerror: could not allocate output array (OOM)"); fclose(outputfile); return 1; } for (i = 0; i < particles_count; i++){ for (k = 0; k < 3; k++){ // 3 Position coordinates output_array[i*8 + k] = target_states[i*12 + k]; } for (k = 3; k < 8; k++){ // particle number ; multiplication-factor ; mass ; beta ; time of origin output_array[i*8 + k] = target_states[i*12 + k + 4]; } } fwrite(output_array, sizeof(float), particles_count * 8, outputfile); //Write to file } free(output_array); } fclose(outputfile); free(target_states); printf("...done."); return 0; } void WUcheck(char *WUsummary_name){ printf("\nChecking work units... "); char **missing_wu_names, WUsummary_path[256]; missing_wu_names = malloc(100000 * sizeof(char*)); // Max number of WUs is 100,000 if (missing_wu_names == NULL){ printf("...failed."); printf("\n\nerror: could not allocate all_wu_paths array (OOM)"); } else{ FILE *WUsummary_file; strcpy(WUsummary_path, 256, cometwu_path); strcat(WUsummary_path, 256, OS_SEP); strcat(WUsummary_path, 256, WUsummary_name); fopen_s(&WUsummary_file, WUsummary_path, "r"); if (WUsummary_file == NULL){ printf("...failed."); printf("\n\nerror: could not open WU summary file; may be open in another program"); } else{ char temp[512], *next_token = NULL; int wu_missing_count = 0, first_line = 1; while ((fgets(temp, sizeof(temp), WUsummary_file)) != NULL){ if (first_line == 1){ first_line = 0; //Skips first line in the WUsummary continue; } all_wu_count++; char* cval = strtok_r(temp, "\t", &next_token); char* WU_name = cval; FILE *ftest; char full_wu_path[256]; strcpy(full_wu_path, 256, cometwu_path); strcat(full_wu_path, 256, OS_SEP); strcat(full_wu_path, 256, WU_name); strcat(full_wu_path, 256, ".ctwu"); fopen_s(&ftest, full_wu_path, "rb"); if (ftest == NULL){ strcat(WU_name, 256, "\n"); missing_wu_names[wu_missing_count] = strdup(WU_name); wu_missing_count++; continue; } fclose(ftest); } fclose(WUsummary_file); if (wu_missing_count == 0){ printf("...done.\n The summary lists %d WUs, none of which are missing.\n", all_wu_count); } else{ printf("...done.\n The summary lists %d WUs, %d of which are missing.", all_wu_count, wu_missing_count); FILE *outputfile; fopen_s(&outputfile, output_path, "w"); if (outputfile == NULL){ printf("\n\nerror: could not create output file"); } else{ int i; for (i = 0; i < wu_missing_count; i++){ fputs(missing_wu_names[i], outputfile); } fclose(outputfile); printf("\n Names of the missing WUs have been written to:\n %s\n", output_path); } } } } }
Ooura_FFT.h
#ifndef _H_OOURA_FFT_ #define _H_OOURA_FFT_ #include <cmath> class Ooura_FFT{ private: int frame_size; int channels; double **a, **w; int **ip; public: inline Ooura_FFT(int _frame_size, int _channels); inline ~Ooura_FFT(); inline void FFT(double **); inline void FFT(double **, int tagret_channels); inline void iFFT(double **); inline void FFT(double *); inline void iFFT(double *); inline void SingleFFT(double *); inline void SingleiFFT(double *); }; /* Copyright: Copyright(C) 1996-2001 Takuya OOURA email: ooura@mmm.t.u-tokyo.ac.jp download: http://momonga.t.u-tokyo.ac.jp/~ooura/fft.html You may use, copy, modify this code for any purpose and without fee. You may distribute this ORIGINAL package. Fast Fourier/Cosine/Sine Transform dimension :one data length :power of 2 decimation :frequency radix :4, 2 data :inplace table :use functions cdft: Complex Discrete Fourier Transform rdft: Real Discrete Fourier Transform ddct: Discrete Cosine Transform ddst: Discrete Sine Transform dfct: Cosine Transform of RDFT (Real Symmetric DFT) dfst: Sine Transform of RDFT (Real Anti-symmetric DFT) function prototypes void cdft(int, int, double *, int *, double *); void rdft(int, int, double *, int *, double *); void ddct(int, int, double *, int *, double *); void ddst(int, int, double *, int *, double *); void dfct(int, double *, double *, int *, double *); void dfst(int, double *, double *, int *, double *); -------- Complex DFT (Discrete Fourier Transform) -------- [definition] <case1> X[k] = sum_j=0^n-1 x[j]*exp(2*pi*i*j*k/n), 0<=k<n <case2> X[k] = sum_j=0^n-1 x[j]*exp(-2*pi*i*j*k/n), 0<=k<n (notes: sum_j=0^n-1 is a summation from j=0 to n-1) [usage] <case1> ip[0] = 0; // first time only cdft(2*n, 1, a, ip, w); <case2> ip[0] = 0; // first time only cdft(2*n, -1, a, ip, w); [parameters] 2*n :data length (int) n >= 1, n = power of 2 a[0...2*n-1] :input/output data (double *) input data a[2*j] = Re(x[j]), a[2*j+1] = Im(x[j]), 0<=j<n output data a[2*k] = Re(X[k]), a[2*k+1] = Im(X[k]), 0<=k<n ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n) strictly, length of ip >= 2+(1<<(int)(log(n+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n/2-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of cdft(2*n, -1, a, ip, w); is cdft(2*n, 1, a, ip, w); for (j = 0; j <= 2 * n - 1; j++) { a[j] *= 1.0 / n; } . -------- Real DFT / Inverse of Real DFT -------- [definition] <case1> RDFT R[k] = sum_j=0^n-1 a[j]*cos(2*pi*j*k/n), 0<=k<=n/2 I[k] = sum_j=0^n-1 a[j]*sin(2*pi*j*k/n), 0<k<n/2 <case2> IRDFT (excluding scale) a[k] = (R[0] + R[n/2]*cos(pi*k))/2 + sum_j=1^n/2-1 R[j]*cos(2*pi*j*k/n) + sum_j=1^n/2-1 I[j]*sin(2*pi*j*k/n), 0<=k<n [usage] <case1> ip[0] = 0; // first time only rdft(n, 1, a, ip, w); <case2> ip[0] = 0; // first time only rdft(n, -1, a, ip, w); [parameters] n :data length (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) <case1> output data a[2*k] = R[k], 0<=k<n/2 a[2*k+1] = I[k], 0<k<n/2 a[1] = R[n/2] <case2> input data a[2*j] = R[j], 0<=j<n/2 a[2*j+1] = I[j], 0<j<n/2 a[1] = R[n/2] ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/2) strictly, length of ip >= 2+(1<<(int)(log(n/2+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n/2-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of rdft(n, 1, a, ip, w); is rdft(n, -1, a, ip, w); for (j = 0; j <= n - 1; j++) { a[j] *= 2.0 / n; } . -------- DCT (Discrete Cosine Transform) / Inverse of DCT -------- [definition] <case1> IDCT (excluding scale) C[k] = sum_j=0^n-1 a[j]*cos(pi*j*(k+1/2)/n), 0<=k<n <case2> DCT C[k] = sum_j=0^n-1 a[j]*cos(pi*(j+1/2)*k/n), 0<=k<n [usage] <case1> ip[0] = 0; // first time only ddct(n, 1, a, ip, w); <case2> ip[0] = 0; // first time only ddct(n, -1, a, ip, w); [parameters] n :data length (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) output data a[k] = C[k], 0<=k<n ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/2) strictly, length of ip >= 2+(1<<(int)(log(n/2+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/4-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of ddct(n, -1, a, ip, w); is a[0] *= 0.5; ddct(n, 1, a, ip, w); for (j = 0; j <= n - 1; j++) { a[j] *= 2.0 / n; } . -------- DST (Discrete Sine Transform) / Inverse of DST -------- [definition] <case1> IDST (excluding scale) S[k] = sum_j=1^n A[j]*sin(pi*j*(k+1/2)/n), 0<=k<n <case2> DST S[k] = sum_j=0^n-1 a[j]*sin(pi*(j+1/2)*k/n), 0<k<=n [usage] <case1> ip[0] = 0; // first time only ddst(n, 1, a, ip, w); <case2> ip[0] = 0; // first time only ddst(n, -1, a, ip, w); [parameters] n :data length (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) <case1> input data a[j] = A[j], 0<j<n a[0] = A[n] output data a[k] = S[k], 0<=k<n <case2> output data a[k] = S[k], 0<k<n a[0] = S[n] ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/2) strictly, length of ip >= 2+(1<<(int)(log(n/2+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/4-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of ddst(n, -1, a, ip, w); is a[0] *= 0.5; ddst(n, 1, a, ip, w); for (j = 0; j <= n - 1; j++) { a[j] *= 2.0 / n; } . -------- Cosine Transform of RDFT (Real Symmetric DFT) -------- [definition] C[k] = sum_j=0^n a[j]*cos(pi*j*k/n), 0<=k<=n [usage] ip[0] = 0; // first time only dfct(n, a, t, ip, w); [parameters] n :data length - 1 (int) n >= 2, n = power of 2 a[0...n] :input/output data (double *) output data a[k] = C[k], 0<=k<=n t[0...n/2] :work area (double *) ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/4) strictly, length of ip >= 2+(1<<(int)(log(n/4+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/8-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of a[0] *= 0.5; a[n] *= 0.5; dfct(n, a, t, ip, w); is a[0] *= 0.5; a[n] *= 0.5; dfct(n, a, t, ip, w); for (j = 0; j <= n; j++) { a[j] *= 2.0 / n; } . -------- Sine Transform of RDFT (Real Anti-symmetric DFT) -------- [definition] S[k] = sum_j=1^n-1 a[j]*sin(pi*j*k/n), 0<k<n [usage] ip[0] = 0; // first time only dfst(n, a, t, ip, w); [parameters] n :data length + 1 (int) n >= 2, n = power of 2 a[0...n-1] :input/output data (double *) output data a[k] = S[k], 0<k<n (a[0] is used for work area) t[0...n/2-1] :work area (double *) ip[0...*] :work area for bit reversal (int *) length of ip >= 2+sqrt(n/4) strictly, length of ip >= 2+(1<<(int)(log(n/4+0.5)/log(2))/2). ip[0],ip[1] are pointers of the cos/sin table. w[0...n*5/8-1] :cos/sin table (double *) w[],ip[] are initialized if ip[0] == 0. [remark] Inverse of dfst(n, a, t, ip, w); is dfst(n, a, t, ip, w); for (j = 1; j <= n - 1; j++) { a[j] *= 2.0 / n; } . Appendix : The cos/sin table is recalculated when the larger table required. w[] and ip[] are compatible with all routines. */ inline void cdft(int, int, double *, int *, double *); inline void rdft(int, int, double *, int *, double *); inline void ddct(int, int, double *, int *, double *); inline void ddst(int, int, double *, int *, double *); inline void dfct(int, double *, double *, int *, double *); inline void dfst(int, double *, double *, int *, double *); inline void makewt(int nw, int* ip, double* w); inline void makect(int nc, int* ip, double* c); inline void bitrv2(int n, int* ip, double* a); inline void cftfsub(int n, double* a, double* w); inline void cftbsub(int n, double* a, double* w); inline void rftfsub(int n, double* a, int nc, double* c); inline void rftbsub(int n, double* a, int nc, double* c); inline void cft1st(int n, double* a, double* w); inline void cftmdl(int n, int l, double* a, double* w); inline Ooura_FFT::Ooura_FFT(int _frame_size, int _channels){ frame_size = _frame_size; channels = _channels; a = new double *[channels]; for (int i = 0; i < channels; i++) a[i] = new double[frame_size]; w = new double *[channels]; for (int i = 0; i < channels; i++) w[i] = new double[frame_size]; ip = new int *[channels]; for (int i = 0; i < channels; i++) ip[i] = new int[(int)(sqrt(frame_size / 2)) + 1]; } inline Ooura_FFT::~Ooura_FFT() { for (int i = 0; i < channels; i++) { delete[] a[i]; delete[] w[i]; delete[] ip[i]; } delete[] a; delete[] w; delete[] ip; } inline void Ooura_FFT::FFT(double **data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = data[j]; ip[j][0] = 0; for (int i = 0; i < frame_size; i++) a[j][i] = t[i]; rdft(frame_size, 1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i += 2) { t[i] = a[j][i]; t[i + 1] = -a[j][i + 1]; } t[1] = 0; t[frame_size] = a[j][1]; t[frame_size + 1] = 0; } } void Ooura_FFT::FFT(double ** data, int target_channels){ int j; #pragma omp parallel for for (j = 0; j < target_channels; j++) { double *t; t = data[j]; ip[j][0] = 0; for (int i = 0; i < frame_size; i++) a[j][i] = t[i]; rdft(frame_size, 1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i += 2) { t[i] = a[j][i]; t[i + 1] = -a[j][i + 1]; } t[1] = 0; t[frame_size] = a[j][1]; t[frame_size + 1] = 0; } } inline void Ooura_FFT::FFT(double *data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = &data[j*(frame_size+2)]; ip[j][0] = 0; for (int i = 0; i < frame_size; i++) a[j][i] = t[i]; rdft(frame_size, 1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i += 2) { t[i] = a[j][i]; t[i + 1] = -a[j][i + 1]; } t[1] = 0; t[frame_size] = a[j][1]; t[frame_size + 1] = 0; } } inline void Ooura_FFT::iFFT(double **data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = data[j]; ip[j][0] = 0; for (int i = 0; i < frame_size; i += 2) { a[j][i] = t[i]; a[j][i + 1] = -t[i + 1]; } a[j][1] = t[frame_size]; rdft(frame_size, -1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i++) { a[j][i] *= 2.0; a[j][i] /= frame_size; } for (int i = 0; i < frame_size; i++) { t[i] = a[j][i]; } } } inline void Ooura_FFT::iFFT(double *data) { int j; #pragma omp parallel for for (j = 0; j < channels; j++) { double *t; t = &data[j*(frame_size+2)]; ip[j][0] = 0; for (int i = 0; i < frame_size; i += 2) { a[j][i] = t[i]; a[j][i + 1] = -t[i + 1]; } a[j][1] = t[frame_size]; rdft(frame_size, -1, a[j], ip[j], w[j]); for (int i = 0; i < frame_size; i++) { a[j][i] *= 2.0; a[j][i] /= frame_size; } for (int i = 0; i < frame_size; i++) { t[i] = a[j][i]; } } } inline void Ooura_FFT::SingleFFT(double *data) { int i; ip[0][0] = 0; for (i = 0; i < frame_size; i++) a[0][i] = data[i]; rdft(frame_size, 1, a[0], ip[0], w[0]); for (i = 0; i < frame_size; i += 2) { data[i] = a[0][i]; data[i + 1] = -a[0][i + 1]; } data[1] = 0; data[frame_size] = a[0][1]; data[frame_size + 1] = 0; } inline void Ooura_FFT::SingleiFFT(double *data) { int i; ip[0][0] = 0; for (i = 0; i < frame_size; i += 2) { a[0][i] = data[i]; a[0][i + 1] = -data[i + 1]; } a[0][1] = data[frame_size]; rdft(frame_size, -1, a[0], ip[0], w[0]); for (i = 0; i < frame_size; i++) { a[0][i] *= 2.0; a[0][i] /= frame_size; } for (i = 0; i < frame_size; i++) { data[i] = a[0][i]; } } inline void cdft(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void bitrv2(int n, int *ip, double *a); void bitrv2conj(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); if (n > (ip[0] << 2)) { makewt(n >> 2, ip, w); } if (n > 4) { if (isgn >= 0) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); } else { bitrv2conj(n, ip + 2, a); cftbsub(n, a, w); } } else if (n == 4) { cftfsub(n, a, w); } } inline void rdft(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void rftbsub(int n, double *a, int nc, double *c); int nw, nc; double xi; nw = ip[0]; if (n > (nw << 2)) { nw = n >> 2; makewt(nw, ip, w); } nc = ip[1]; if (n > (nc << 2)) { nc = n >> 2; makect(nc, ip, w + nw); } if (isgn >= 0) { if (n > 4) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); rftfsub(n, a, nc, w + nw); } else if (n == 4) { cftfsub(n, a, w); } xi = a[0] - a[1]; a[0] += a[1]; a[1] = xi; } else { a[1] = 0.5 * (a[0] - a[1]); a[0] -= a[1]; if (n > 4) { rftbsub(n, a, nc, w + nw); bitrv2(n, ip + 2, a); cftbsub(n, a, w); } else if (n == 4) { cftfsub(n, a, w); } } } inline void ddct(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void rftbsub(int n, double *a, int nc, double *c); void dctsub(int n, double *a, int nc, double *c); int j, nw, nc; double xr; nw = ip[0]; if (n > (nw << 2)) { nw = n >> 2; makewt(nw, ip, w); } nc = ip[1]; if (n > nc) { nc = n; makect(nc, ip, w + nw); } if (isgn < 0) { xr = a[n - 1]; for (j = n - 2; j >= 2; j -= 2) { a[j + 1] = a[j] - a[j - 1]; a[j] += a[j - 1]; } a[1] = a[0] - xr; a[0] += xr; if (n > 4) { rftbsub(n, a, nc, w + nw); bitrv2(n, ip + 2, a); cftbsub(n, a, w); } else if (n == 4) { cftfsub(n, a, w); } } dctsub(n, a, nc, w + nw); if (isgn >= 0) { if (n > 4) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); rftfsub(n, a, nc, w + nw); } else if (n == 4) { cftfsub(n, a, w); } xr = a[0] - a[1]; a[0] += a[1]; for (j = 2; j < n; j += 2) { a[j - 1] = a[j] - a[j + 1]; a[j] += a[j + 1]; } a[n - 1] = xr; } } inline void ddst(int n, int isgn, double *a, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void cftbsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void rftbsub(int n, double *a, int nc, double *c); void dstsub(int n, double *a, int nc, double *c); int j, nw, nc; double xr; nw = ip[0]; if (n > (nw << 2)) { nw = n >> 2; makewt(nw, ip, w); } nc = ip[1]; if (n > nc) { nc = n; makect(nc, ip, w + nw); } if (isgn < 0) { xr = a[n - 1]; for (j = n - 2; j >= 2; j -= 2) { a[j + 1] = -a[j] - a[j - 1]; a[j] -= a[j - 1]; } a[1] = a[0] + xr; a[0] -= xr; if (n > 4) { rftbsub(n, a, nc, w + nw); bitrv2(n, ip + 2, a); cftbsub(n, a, w); } else if (n == 4) { cftfsub(n, a, w); } } dstsub(n, a, nc, w + nw); if (isgn >= 0) { if (n > 4) { bitrv2(n, ip + 2, a); cftfsub(n, a, w); rftfsub(n, a, nc, w + nw); } else if (n == 4) { cftfsub(n, a, w); } xr = a[0] - a[1]; a[0] += a[1]; for (j = 2; j < n; j += 2) { a[j - 1] = -a[j] - a[j + 1]; a[j] -= a[j + 1]; } a[n - 1] = -xr; } } inline void dfct(int n, double *a, double *t, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void dctsub(int n, double *a, int nc, double *c); int j, k, l, m, mh, nw, nc; double xr, xi, yr, yi; nw = ip[0]; if (n > (nw << 3)) { nw = n >> 3; makewt(nw, ip, w); } nc = ip[1]; if (n > (nc << 1)) { nc = n >> 1; makect(nc, ip, w + nw); } m = n >> 1; yi = a[m]; xi = a[0] + a[n]; a[0] -= a[n]; t[0] = xi - yi; t[m] = xi + yi; if (n > 2) { mh = m >> 1; for (j = 1; j < mh; j++) { k = m - j; xr = a[j] - a[n - j]; xi = a[j] + a[n - j]; yr = a[k] - a[n - k]; yi = a[k] + a[n - k]; a[j] = xr; a[k] = yr; t[j] = xi - yi; t[k] = xi + yi; } t[mh] = a[mh] + a[n - mh]; a[mh] -= a[n - mh]; dctsub(m, a, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, a); cftfsub(m, a, w); rftfsub(m, a, nc, w + nw); } else if (m == 4) { cftfsub(m, a, w); } a[n - 1] = a[0] - a[1]; a[1] = a[0] + a[1]; for (j = m - 2; j >= 2; j -= 2) { a[2 * j + 1] = a[j] + a[j + 1]; a[2 * j - 1] = a[j] - a[j + 1]; } l = 2; m = mh; while (m >= 2) { dctsub(m, t, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, t); cftfsub(m, t, w); rftfsub(m, t, nc, w + nw); } else if (m == 4) { cftfsub(m, t, w); } a[n - l] = t[0] - t[1]; a[l] = t[0] + t[1]; k = 0; for (j = 2; j < m; j += 2) { k += l << 2; a[k - l] = t[j] - t[j + 1]; a[k + l] = t[j] + t[j + 1]; } l <<= 1; mh = m >> 1; for (j = 0; j < mh; j++) { k = m - j; t[j] = t[m + k] - t[m + j]; t[k] = t[m + k] + t[m + j]; } t[mh] = t[m + mh]; m = mh; } a[l] = t[0]; a[n] = t[2] - t[1]; a[0] = t[2] + t[1]; } else { a[1] = a[0]; a[2] = t[0]; a[0] = t[1]; } } inline void dfst(int n, double *a, double *t, int *ip, double *w) { void makewt(int nw, int *ip, double *w); void makect(int nc, int *ip, double *c); void bitrv2(int n, int *ip, double *a); void cftfsub(int n, double *a, double *w); void rftfsub(int n, double *a, int nc, double *c); void dstsub(int n, double *a, int nc, double *c); int j, k, l, m, mh, nw, nc; double xr, xi, yr, yi; nw = ip[0]; if (n > (nw << 3)) { nw = n >> 3; makewt(nw, ip, w); } nc = ip[1]; if (n > (nc << 1)) { nc = n >> 1; makect(nc, ip, w + nw); } if (n > 2) { m = n >> 1; mh = m >> 1; for (j = 1; j < mh; j++) { k = m - j; xr = a[j] + a[n - j]; xi = a[j] - a[n - j]; yr = a[k] + a[n - k]; yi = a[k] - a[n - k]; a[j] = xr; a[k] = yr; t[j] = xi + yi; t[k] = xi - yi; } t[0] = a[mh] - a[n - mh]; a[mh] += a[n - mh]; a[0] = a[m]; dstsub(m, a, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, a); cftfsub(m, a, w); rftfsub(m, a, nc, w + nw); } else if (m == 4) { cftfsub(m, a, w); } a[n - 1] = a[1] - a[0]; a[1] = a[0] + a[1]; for (j = m - 2; j >= 2; j -= 2) { a[2 * j + 1] = a[j] - a[j + 1]; a[2 * j - 1] = -a[j] - a[j + 1]; } l = 2; m = mh; while (m >= 2) { dstsub(m, t, nc, w + nw); if (m > 4) { bitrv2(m, ip + 2, t); cftfsub(m, t, w); rftfsub(m, t, nc, w + nw); } else if (m == 4) { cftfsub(m, t, w); } a[n - l] = t[1] - t[0]; a[l] = t[0] + t[1]; k = 0; for (j = 2; j < m; j += 2) { k += l << 2; a[k - l] = -t[j] - t[j + 1]; a[k + l] = t[j] - t[j + 1]; } l <<= 1; mh = m >> 1; for (j = 1; j < mh; j++) { k = m - j; t[j] = t[m + k] + t[m + j]; t[k] = t[m + k] - t[m + j]; } t[0] = t[m + mh]; m = mh; } a[l] = t[0]; } a[0] = 0; } /* -------- initializing routines -------- */ inline void makewt(int nw, int *ip, double *w) { void bitrv2(int n, int *ip, double *a); int j, nwh; double delta, x, y; ip[0] = nw; ip[1] = 1; if (nw > 2) { nwh = nw >> 1; delta = atan(1.0) / nwh; w[0] = 1; w[1] = 0; w[nwh] = cos(delta * nwh); w[nwh + 1] = w[nwh]; if (nwh > 2) { for (j = 2; j < nwh; j += 2) { x = cos(delta * j); y = sin(delta * j); w[j] = x; w[j + 1] = y; w[nw - j] = y; w[nw - j + 1] = x; } bitrv2(nw, ip + 2, w); } } } inline void makect(int nc, int *ip, double *c) { int j, nch; double delta; ip[1] = nc; if (nc > 1) { nch = nc >> 1; delta = atan(1.0) / nch; c[0] = cos(delta * nch); c[nch] = 0.5 * c[0]; for (j = 1; j < nch; j++) { c[j] = 0.5 * cos(delta * j); c[nc - j] = 0.5 * sin(delta * j); } } } /* -------- child routines -------- */ inline void bitrv2(int n, int *ip, double *a) { int j, j1, k, k1, l, m, m2; double xr, xi, yr, yi; ip[0] = 0; l = n; m = 1; while ((m << 3) < l) { l >>= 1; for (j = 0; j < m; j++) { ip[m + j] = ip[j] + l; } m <<= 1; } m2 = 2 * m; if ((m << 3) == l) { for (k = 0; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 -= m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } j1 = 2 * k + m2 + ip[k]; k1 = j1 + m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } } else { for (k = 1; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += m2; xr = a[j1]; xi = a[j1 + 1]; yr = a[k1]; yi = a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } } } } inline void bitrv2conj(int n, int *ip, double *a) { int j, j1, k, k1, l, m, m2; double xr, xi, yr, yi; ip[0] = 0; l = n; m = 1; while ((m << 3) < l) { l >>= 1; for (j = 0; j < m; j++) { ip[m + j] = ip[j] + l; } m <<= 1; } m2 = 2 * m; if ((m << 3) == l) { for (k = 0; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 -= m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += 2 * m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } k1 = 2 * k + ip[k]; a[k1 + 1] = -a[k1 + 1]; j1 = k1 + m2; k1 = j1 + m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; k1 += m2; a[k1 + 1] = -a[k1 + 1]; } } else { a[1] = -a[1]; a[m2 + 1] = -a[m2 + 1]; for (k = 1; k < m; k++) { for (j = 0; j < k; j++) { j1 = 2 * j + ip[k]; k1 = 2 * k + ip[j]; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; j1 += m2; k1 += m2; xr = a[j1]; xi = -a[j1 + 1]; yr = a[k1]; yi = -a[k1 + 1]; a[j1] = yr; a[j1 + 1] = yi; a[k1] = xr; a[k1 + 1] = xi; } k1 = 2 * k + ip[k]; a[k1 + 1] = -a[k1 + 1]; a[k1 + m2 + 1] = -a[k1 + m2 + 1]; } } } inline void cftfsub(int n, double *a, double *w) { void cft1st(int n, double *a, double *w); void cftmdl(int n, int l, double *a, double *w); int j, j1, j2, j3, l; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; l = 2; if (n > 8) { cft1st(n, a, w); l = 8; while ((l << 2) < n) { cftmdl(n, l, a, w); l <<= 2; } } if ((l << 2) == n) { for (j = 0; j < l; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; a[j2] = x0r - x2r; a[j2 + 1] = x0i - x2i; a[j1] = x1r - x3i; a[j1 + 1] = x1i + x3r; a[j3] = x1r + x3i; a[j3 + 1] = x1i - x3r; } } else { for (j = 0; j < l; j += 2) { j1 = j + l; x0r = a[j] - a[j1]; x0i = a[j + 1] - a[j1 + 1]; a[j] += a[j1]; a[j + 1] += a[j1 + 1]; a[j1] = x0r; a[j1 + 1] = x0i; } } } inline void cftbsub(int n, double *a, double *w) { void cft1st(int n, double *a, double *w); void cftmdl(int n, int l, double *a, double *w); int j, j1, j2, j3, l; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; l = 2; if (n > 8) { cft1st(n, a, w); l = 8; while ((l << 2) < n) { cftmdl(n, l, a, w); l <<= 2; } } if ((l << 2) == n) { for (j = 0; j < l; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = -a[j + 1] - a[j1 + 1]; x1r = a[j] - a[j1]; x1i = -a[j + 1] + a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i - x2i; a[j2] = x0r - x2r; a[j2 + 1] = x0i + x2i; a[j1] = x1r - x3i; a[j1 + 1] = x1i - x3r; a[j3] = x1r + x3i; a[j3 + 1] = x1i + x3r; } } else { for (j = 0; j < l; j += 2) { j1 = j + l; x0r = a[j] - a[j1]; x0i = -a[j + 1] + a[j1 + 1]; a[j] += a[j1]; a[j + 1] = -a[j + 1] - a[j1 + 1]; a[j1] = x0r; a[j1 + 1] = x0i; } } } inline void cft1st(int n, double *a, double *w) { int j, k1, k2; double wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; x0r = a[0] + a[2]; x0i = a[1] + a[3]; x1r = a[0] - a[2]; x1i = a[1] - a[3]; x2r = a[4] + a[6]; x2i = a[5] + a[7]; x3r = a[4] - a[6]; x3i = a[5] - a[7]; a[0] = x0r + x2r; a[1] = x0i + x2i; a[4] = x0r - x2r; a[5] = x0i - x2i; a[2] = x1r - x3i; a[3] = x1i + x3r; a[6] = x1r + x3i; a[7] = x1i - x3r; wk1r = w[2]; x0r = a[8] + a[10]; x0i = a[9] + a[11]; x1r = a[8] - a[10]; x1i = a[9] - a[11]; x2r = a[12] + a[14]; x2i = a[13] + a[15]; x3r = a[12] - a[14]; x3i = a[13] - a[15]; a[8] = x0r + x2r; a[9] = x0i + x2i; a[12] = x2i - x0i; a[13] = x0r - x2r; x0r = x1r - x3i; x0i = x1i + x3r; a[10] = wk1r * (x0r - x0i); a[11] = wk1r * (x0r + x0i); x0r = x3i + x1r; x0i = x3r - x1i; a[14] = wk1r * (x0i - x0r); a[15] = wk1r * (x0i + x0r); k1 = 0; for (j = 16; j < n; j += 16) { k1 += 2; k2 = 2 * k1; wk2r = w[k1]; wk2i = w[k1 + 1]; wk1r = w[k2]; wk1i = w[k2 + 1]; wk3r = wk1r - 2 * wk2i * wk1i; wk3i = 2 * wk2i * wk1r - wk1i; x0r = a[j] + a[j + 2]; x0i = a[j + 1] + a[j + 3]; x1r = a[j] - a[j + 2]; x1i = a[j + 1] - a[j + 3]; x2r = a[j + 4] + a[j + 6]; x2i = a[j + 5] + a[j + 7]; x3r = a[j + 4] - a[j + 6]; x3i = a[j + 5] - a[j + 7]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j + 4] = wk2r * x0r - wk2i * x0i; a[j + 5] = wk2r * x0i + wk2i * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j + 2] = wk1r * x0r - wk1i * x0i; a[j + 3] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j + 6] = wk3r * x0r - wk3i * x0i; a[j + 7] = wk3r * x0i + wk3i * x0r; wk1r = w[k2 + 2]; wk1i = w[k2 + 3]; wk3r = wk1r - 2 * wk2r * wk1i; wk3i = 2 * wk2r * wk1r - wk1i; x0r = a[j + 8] + a[j + 10]; x0i = a[j + 9] + a[j + 11]; x1r = a[j + 8] - a[j + 10]; x1i = a[j + 9] - a[j + 11]; x2r = a[j + 12] + a[j + 14]; x2i = a[j + 13] + a[j + 15]; x3r = a[j + 12] - a[j + 14]; x3i = a[j + 13] - a[j + 15]; a[j + 8] = x0r + x2r; a[j + 9] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j + 12] = -wk2i * x0r - wk2r * x0i; a[j + 13] = -wk2i * x0i + wk2r * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j + 10] = wk1r * x0r - wk1i * x0i; a[j + 11] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j + 14] = wk3r * x0r - wk3i * x0i; a[j + 15] = wk3r * x0i + wk3i * x0r; } } inline void cftmdl(int n, int l, double *a, double *w) { int j, j1, j2, j3, k, k1, k2, m, m2; double wk1r, wk1i, wk2r, wk2i, wk3r, wk3i; double x0r, x0i, x1r, x1i, x2r, x2i, x3r, x3i; m = l << 2; for (j = 0; j < l; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; a[j2] = x0r - x2r; a[j2 + 1] = x0i - x2i; a[j1] = x1r - x3i; a[j1 + 1] = x1i + x3r; a[j3] = x1r + x3i; a[j3 + 1] = x1i - x3r; } wk1r = w[2]; for (j = m; j < l + m; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; a[j2] = x2i - x0i; a[j2 + 1] = x0r - x2r; x0r = x1r - x3i; x0i = x1i + x3r; a[j1] = wk1r * (x0r - x0i); a[j1 + 1] = wk1r * (x0r + x0i); x0r = x3i + x1r; x0i = x3r - x1i; a[j3] = wk1r * (x0i - x0r); a[j3 + 1] = wk1r * (x0i + x0r); } k1 = 0; m2 = 2 * m; for (k = m2; k < n; k += m2) { k1 += 2; k2 = 2 * k1; wk2r = w[k1]; wk2i = w[k1 + 1]; wk1r = w[k2]; wk1i = w[k2 + 1]; wk3r = wk1r - 2 * wk2i * wk1i; wk3i = 2 * wk2i * wk1r - wk1i; for (j = k; j < l + k; j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j2] = wk2r * x0r - wk2i * x0i; a[j2 + 1] = wk2r * x0i + wk2i * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j1] = wk1r * x0r - wk1i * x0i; a[j1 + 1] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j3] = wk3r * x0r - wk3i * x0i; a[j3 + 1] = wk3r * x0i + wk3i * x0r; } wk1r = w[k2 + 2]; wk1i = w[k2 + 3]; wk3r = wk1r - 2 * wk2r * wk1i; wk3i = 2 * wk2r * wk1r - wk1i; for (j = k + m; j < l + (k + m); j += 2) { j1 = j + l; j2 = j1 + l; j3 = j2 + l; x0r = a[j] + a[j1]; x0i = a[j + 1] + a[j1 + 1]; x1r = a[j] - a[j1]; x1i = a[j + 1] - a[j1 + 1]; x2r = a[j2] + a[j3]; x2i = a[j2 + 1] + a[j3 + 1]; x3r = a[j2] - a[j3]; x3i = a[j2 + 1] - a[j3 + 1]; a[j] = x0r + x2r; a[j + 1] = x0i + x2i; x0r -= x2r; x0i -= x2i; a[j2] = -wk2i * x0r - wk2r * x0i; a[j2 + 1] = -wk2i * x0i + wk2r * x0r; x0r = x1r - x3i; x0i = x1i + x3r; a[j1] = wk1r * x0r - wk1i * x0i; a[j1 + 1] = wk1r * x0i + wk1i * x0r; x0r = x1r + x3i; x0i = x1i - x3r; a[j3] = wk3r * x0r - wk3i * x0i; a[j3 + 1] = wk3r * x0i + wk3i * x0r; } } } inline void rftfsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr, xi, yr, yi; m = n >> 1; ks = 2 * nc / m; kk = 0; for (j = 2; j < m; j += 2) { k = n - j; kk += ks; wkr = 0.5 - c[nc - kk]; wki = c[kk]; xr = a[j] - a[k]; xi = a[j + 1] + a[k + 1]; yr = wkr * xr - wki * xi; yi = wkr * xi + wki * xr; a[j] -= yr; a[j + 1] -= yi; a[k] += yr; a[k + 1] -= yi; } } inline void rftbsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr, xi, yr, yi; a[1] = -a[1]; m = n >> 1; ks = 2 * nc / m; kk = 0; for (j = 2; j < m; j += 2) { k = n - j; kk += ks; wkr = 0.5 - c[nc - kk]; wki = c[kk]; xr = a[j] - a[k]; xi = a[j + 1] + a[k + 1]; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; a[j] -= yr; a[j + 1] = yi - a[j + 1]; a[k] += yr; a[k + 1] = yi - a[k + 1]; } a[m + 1] = -a[m + 1]; } inline void dctsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr; m = n >> 1; ks = nc / n; kk = 0; for (j = 1; j < m; j++) { k = n - j; kk += ks; wkr = c[kk] - c[nc - kk]; wki = c[kk] + c[nc - kk]; xr = wki * a[j] - wkr * a[k]; a[j] = wkr * a[j] + wki * a[k]; a[k] = xr; } a[m] *= c[0]; } inline void dstsub(int n, double *a, int nc, double *c) { int j, k, kk, ks, m; double wkr, wki, xr; m = n >> 1; ks = nc / n; kk = 0; for (j = 1; j < m; j++) { k = n - j; kk += ks; wkr = c[kk] - c[nc - kk]; wki = c[kk] + c[nc - kk]; xr = wki * a[k] - wkr * a[j]; a[k] = wkr * a[k] + wki * a[j]; a[j] = xr; } a[m] *= c[0]; } #endif
inputBug330.c
/* test preprocessing info before and after a statement */ #include <stdio.h> #ifdef _OPENMP #include "omp.h" #endif int main() { int nthreads; #pragma omp parallel { nthreads++; #if defined(_OPENMP) #pragma omp master { printf("I am the master thread.\n"); } #endif // nthreads++; // things are quite different if this stmt exists!! } return 0; }
graphprop.c
#include<stdio.h> #include<string.h> #include "graph.h" #include "graphprop.h" #include "graphutil.h" int64_t T = 0 ; double avgincident; double scaledT; double aed; double sccindex; double clusterCoeff; double avgEdgeDistance(graph *G) { long * edgeDistances = (long*) malloc (sizeof(long) * G->numNodes); // long * edgeListSize = (long*) malloc (sizeof(long) * G->numNodes); /* printf("The number of nodes is %d \n", G->numNodes); */ /* printf("The number of edges is %d \n", G->numEdges); */ node_t v; //double contributions; #pragma omp parallel { #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (v = 0; v < G->numNodes; v ++) { edge_t u_idx; // edgeListSize[v] = G->begin[v+1] - G->begin[v]; edgeDistances[v] = 0; for (u_idx = G->begin[v]; u_idx < G->begin[v+1]; u_idx ++) { node_t u = G->node_idx [u_idx]; // printf("The u is %d the v is %d and dist is %d \n", u,v, abs(u-v)); edgeDistances[v] += abs(u-v); } } } aed = 0.0; // node_t v; for(v = 0; v < G->numNodes; v ++) { aed += ((double)edgeDistances[v])/G->numEdges; } // Scaling aed = aed/ G->numNodes; free(edgeDistances); return aed; } double avgClusterCoeff(graph *G) { double* localClustering = (double*) malloc (sizeof(double) * G->numNodes); #pragma omp parallel { node_t v; #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (v = 0; v < G->numNodes; v ++) { edge_t u_idx; localClustering[v] = 0; for (u_idx = G->begin[v]; u_idx < G->begin[v+1]; u_idx ++) { node_t u = G->node_idx [u_idx]; edge_t w_idx; // printf("The node is %d %d \n", v,u); for (w_idx = u_idx+1; w_idx < G->begin[v+1]; w_idx ++) { node_t w = G->node_idx [w_idx]; //printf("The check for neighbour is between %d and %d \n ",u,w); if (isNeighbour(G,w,u)) { localClustering[v] += 1; } if (isNeighbour(G,u,w)) { localClustering[v] += 1; } } } // printf("The value of local clustering is %f \n", localClustering[v]); int neighbours = (int) (G->begin[v+1] - G->begin[v]); if(neighbours > 1) { localClustering[v] = localClustering[v]/(neighbours * (neighbours -1)); } // printf("The value of local clustering is %f \n", localClustering[v]); } } clusterCoeff = 0; node_t v; for(v =0;v<G->numNodes;v++) { clusterCoeff += localClustering[v]; } clusterCoeff = clusterCoeff/G->numNodes; free(localClustering); return clusterCoeff; } node_t diameter(graph *G) { return 0; //TODO if required } double sparsity(graph *G) { double sparsityMeasure = (((double)G->numEdges) / G->numNodes); /* printf("The sparsity is %f \n", sparsityMeasure); */ sparsityMeasure = sparsityMeasure / (G->numNodes-1); /* printf("The sparsity is %.8f \n", sparsityMeasure); */ if(sparsityMeasure == 0) printf("Hello"); return sparsityMeasure; } double sccIndex(graph *G) { double* scclist = (double*) malloc (sizeof(double) * G->numNodes); int* visited = (int*) malloc (sizeof(int) * G->numNodes); int* stack = (int*) malloc(sizeof(int) * G->numNodes); memset(visited, 0, G->numNodes*sizeof(int)); createReverseEdge(G); free(stack); free(scclist); free(visited); return 0; } double triangle_counting(graph *G) { // inittracking(); #pragma omp parallel { int64_t T_private = 0; node_t v; #if defined(PARFOR_GUIDED) #pragma omp for schedule(guided, PAR_CHUNKSIZE) #elif defined(PARFOR_DYNAMIC) #pragma omp for schedule(dynamic, PAR_CHUNKSIZE) #elif defined(TASKLOOP_DEFINED) #pragma omp taskloop num_tasks(NUM_TASKS) #else #pragma omp for schedule(static) #endif for (v = 0; v < G->numNodes; v ++) { edge_t u_idx; for (u_idx = G->begin[v]; u_idx < G->begin[v+1]; u_idx ++) { node_t u = G->node_idx [u_idx]; //printf("The edge is from %d to %d \n", v, u); if (u > v) { edge_t w_idx; for (w_idx = G->begin[v]; w_idx < G->begin[v+1]; w_idx ++) { node_t w = G->node_idx [w_idx]; if (w > u) { if (isNeighbour(G,w,u)) { T_private = T_private + 1 ; } } } } } } #pragma omp atomic T += T_private; } // pausetracking(); scaledT = ( (double)2 * T) / ((double)G->numNodes * (G->numNodes-1) * (G->numNodes-2)); return scaledT ; }
task-barrier.c
/* * task-barrier.c -- Archer testcase */ //===----------------------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // // See tools/archer/LICENSE.txt for details. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // RUN: %libarcher-compile-and-run | FileCheck %s // REQUIRES: tsan #include <omp.h> #include <stdio.h> #include <unistd.h> #include "ompt/ompt-signal.h" int main(int argc, char *argv[]) { int var = 0, a = 0; #pragma omp parallel num_threads(2) shared(var, a) { #pragma omp master { #pragma omp task shared(var) { OMPT_SIGNAL(a); var++; } // Give other thread time to steal the task. OMPT_WAIT(a, 1); } #pragma omp barrier #pragma omp master { var++; } } fprintf(stderr, "DONE\n"); int error = (var != 2); return error; } // CHECK-NOT: ThreadSanitizer: data race // CHECK-NOT: ThreadSanitizer: reported // CHECK: DONE
resize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS IIIII ZZZZZ EEEEE % % R R E SS I ZZ E % % RRRR EEE SSS I ZZZ EEE % % R R E SS I ZZ E % % R R EEEEE SSSSS IIIII ZZZZZ EEEEE % % % % % % MagickCore Image Resize Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/draw.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/magick.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resize.h" #include "MagickCore/resize-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #if defined(MAGICKCORE_LQR_DELEGATE) #include <lqr.h> #endif /* Typedef declarations. */ struct _ResizeFilter { double (*filter)(const double,const ResizeFilter *), (*window)(const double,const ResizeFilter *), support, /* filter region of support - the filter support limit */ window_support, /* window support, usally equal to support (expert only) */ scale, /* dimension scaling to fit window support (usally 1.0) */ blur, /* x-scale (blur-sharpen) */ coefficient[7]; /* cubic coefficents for BC-cubic filters */ ResizeWeightingFunctionType filterWeightingType, windowWeightingType; size_t signature; }; /* Forward declaractions. */ static double I0(double x), BesselOrderOne(double), Sinc(const double, const ResizeFilter *), SincFast(const double, const ResizeFilter *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F i l t e r F u n c t i o n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % These are the various filter and windowing functions that are provided. % % They are internal to this module only. See AcquireResizeFilterInfo() for % details of the access to these functions, via the GetResizeFilterSupport() % and GetResizeFilterWeight() API interface. % % The individual filter functions have this format... % % static MagickRealtype *FilterName(const double x,const double support) % % A description of each parameter follows: % % o x: the distance from the sampling point generally in the range of 0 to % support. The GetResizeFilterWeight() ensures this a positive value. % % o resize_filter: current filter information. This allows function to % access support, and possibly other pre-calculated information defining % the functions. % */ static double Blackman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Blackman: 2nd order cosine windowing function: 0.42 + 0.5 cos(pi x) + 0.08 cos(2pi x) Refactored by Chantal Racette and Nicolas Robidoux to one trig call and five flops. */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.34+cosine*(0.5+cosine*0.16)); } static double Bohman(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Bohman: 2rd Order cosine windowing function: (1-x) cos(pi x) + sin(pi x) / pi. Refactored by Nicolas Robidoux to one trig call, one sqrt call, and 7 flops, taking advantage of the fact that the support of Bohman is 1.0 (so that we know that sin(pi x) >= 0). */ const double cosine = cos((double) (MagickPI*x)); const double sine=sqrt(1.0-cosine*cosine); magick_unreferenced(resize_filter); return((1.0-x)*cosine+(1.0/MagickPI)*sine); } static double Box(const double magick_unused(x), const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(x); magick_unreferenced(resize_filter); /* A Box filter is a equal weighting function (all weights equal). DO NOT LIMIT results by support or resize point sampling will work as it requests points beyond its normal 0.0 support size. */ return(1.0); } static double Cosine(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Cosine window function: cos((pi/2)*x). */ return(cos((double) (MagickPI2*x))); } static double CubicBC(const double x,const ResizeFilter *resize_filter) { /* Cubic Filters using B,C determined values: Mitchell-Netravali B = 1/3 C = 1/3 "Balanced" cubic spline filter Catmull-Rom B = 0 C = 1/2 Interpolatory and exact on linears Spline B = 1 C = 0 B-Spline Gaussian approximation Hermite B = 0 C = 0 B-Spline interpolator See paper by Mitchell and Netravali, Reconstruction Filters in Computer Graphics Computer Graphics, Volume 22, Number 4, August 1988 http://www.cs.utexas.edu/users/fussell/courses/cs384g/lectures/mitchell/ Mitchell.pdf. Coefficents are determined from B,C values: P0 = ( 6 - 2*B )/6 = coeff[0] P1 = 0 P2 = (-18 +12*B + 6*C )/6 = coeff[1] P3 = ( 12 - 9*B - 6*C )/6 = coeff[2] Q0 = ( 8*B +24*C )/6 = coeff[3] Q1 = ( -12*B -48*C )/6 = coeff[4] Q2 = ( 6*B +30*C )/6 = coeff[5] Q3 = ( - 1*B - 6*C )/6 = coeff[6] which are used to define the filter: P0 + P1*x + P2*x^2 + P3*x^3 0 <= x < 1 Q0 + Q1*x + Q2*x^2 + Q3*x^3 1 <= x < 2 which ensures function is continuous in value and derivative (slope). */ if (x < 1.0) return(resize_filter->coefficient[0]+x*(x* (resize_filter->coefficient[1]+x*resize_filter->coefficient[2]))); if (x < 2.0) return(resize_filter->coefficient[3]+x*(resize_filter->coefficient[4]+x* (resize_filter->coefficient[5]+x*resize_filter->coefficient[6]))); return(0.0); } static double CubicSpline(const double x,const ResizeFilter *resize_filter) { if (resize_filter->support <= 2.0) { /* 2-lobe Spline filter. */ if (x < 1.0) return(((x-9.0/5.0)*x-1.0/5.0)*x+1.0); if (x < 2.0) return(((-1.0/3.0*(x-1.0)+4.0/5.0)*(x-1.0)-7.0/15.0)*(x-1.0)); return(0.0); } if (resize_filter->support <= 3.0) { /* 3-lobe Spline filter. */ if (x < 1.0) return(((13.0/11.0*x-453.0/209.0)*x-3.0/209.0)*x+1.0); if (x < 2.0) return(((-6.0/11.0*(x-1.0)+270.0/209.0)*(x-1.0)-156.0/209.0)*(x-1.0)); if (x < 3.0) return(((1.0/11.0*(x-2.0)-45.0/209.0)*(x-2.0)+26.0/209.0)*(x-2.0)); return(0.0); } /* 4-lobe Spline filter. */ if (x < 1.0) return(((49.0/41.0*x-6387.0/2911.0)*x-3.0/2911.0)*x+1.0); if (x < 2.0) return(((-24.0/41.0*(x-1.0)+4032.0/2911.0)*(x-1.0)-2328.0/2911.0)*(x-1.0)); if (x < 3.0) return(((6.0/41.0*(x-2.0)-1008.0/2911.0)*(x-2.0)+582.0/2911.0)*(x-2.0)); if (x < 4.0) return(((-1.0/41.0*(x-3.0)+168.0/2911.0)*(x-3.0)-97.0/2911.0)*(x-3.0)); return(0.0); } static double Gaussian(const double x,const ResizeFilter *resize_filter) { /* Gaussian with a sigma = 1/2 (or as user specified) Gaussian Formula (1D) ... exp( -(x^2)/((2.0*sigma^2) ) / (sqrt(2*PI)*sigma^2)) Gaussian Formula (2D) ... exp( -(x^2+y^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) or for radius exp( -(r^2)/(2.0*sigma^2) ) / (PI*sigma^2) ) Note that it is only a change from 1-d to radial form is in the normalization multiplier which is not needed or used when Gaussian is used as a filter. The constants are pre-calculated... coeff[0]=sigma; coeff[1]=1.0/(2.0*sigma^2); coeff[2]=1.0/(sqrt(2*PI)*sigma^2); exp( -coeff[1]*(x^2)) ) * coeff[2]; However the multiplier coeff[1] is need, the others are informative only. This separates the gaussian 'sigma' value from the 'blur/support' settings allowing for its use in special 'small sigma' gaussians, without the filter 'missing' pixels because the support becomes too small. */ return(exp((double)(-resize_filter->coefficient[1]*x*x))); } static double Hann(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Cosine window function: 0.5+0.5*cos(pi*x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.5+0.5*cosine); } static double Hamming(const double x, const ResizeFilter *magick_unused(resize_filter)) { /* Offset cosine window function: .54 + .46 cos(pi x). */ const double cosine = cos((double) (MagickPI*x)); magick_unreferenced(resize_filter); return(0.54+0.46*cosine); } static double Jinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* See Pratt "Digital Image Processing" p.97 for Jinc/Bessel functions. http://mathworld.wolfram.com/JincFunction.html and page 11 of http://www.ph.ed.ac.uk/%7ewjh/teaching/mo/slides/lens/lens.pdf The original "zoom" program by Paul Heckbert called this "Bessel". But really it is more accurately named "Jinc". */ if (x == 0.0) return(0.5*MagickPI); return(BesselOrderOne(MagickPI*x)/x); } static double Kaiser(const double x,const ResizeFilter *resize_filter) { /* Kaiser Windowing Function (bessel windowing) I0( beta * sqrt( 1-x^2) ) / IO(0) Beta (coeff[0]) is a free value from 5 to 8 (defaults to 6.5). However it is typically defined in terms of Alpha*PI The normalization factor (coeff[1]) is not actually needed, but without it the filters has a large value at x=0 making it difficult to compare the function with other windowing functions. */ return(resize_filter->coefficient[1]*I0(resize_filter->coefficient[0]* sqrt((double) (1.0-x*x)))); } static double Lagrange(const double x,const ResizeFilter *resize_filter) { double value; ssize_t i; ssize_t n, order; /* Lagrange piecewise polynomial fit of sinc: N is the 'order' of the lagrange function and depends on the overall support window size of the filter. That is: for a support of 2, it gives a lagrange-4 (piecewise cubic function). "n" identifies the piece of the piecewise polynomial. See Survey: Interpolation Methods, IEEE Transactions on Medical Imaging, Vol 18, No 11, November 1999, p1049-1075, -- Equation 27 on p1064. */ if (x > resize_filter->support) return(0.0); order=(ssize_t) (2.0*resize_filter->window_support); /* number of pieces */ n=(ssize_t) (resize_filter->window_support+x); value=1.0f; for (i=0; i < order; i++) if (i != n) value*=(n-i-x)/(n-i); return(value); } static double Quadratic(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 2rd order (quadratic) B-Spline approximation of Gaussian. */ if (x < 0.5) return(0.75-x*x); if (x < 1.5) return(0.5*(x-1.5)*(x-1.5)); return(0.0); } static double Sinc(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Scaled sinc(x) function using a trig call: sinc(x) == sin(pi x)/(pi x). */ if (x != 0.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } return((double) 1.0); } static double SincFast(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Approximations of the sinc function sin(pi x)/(pi x) over the interval [-4,4] constructed by Nicolas Robidoux and Chantal Racette with funding from the Natural Sciences and Engineering Research Council of Canada. Although the approximations are polynomials (for low order of approximation) and quotients of polynomials (for higher order of approximation) and consequently are similar in form to Taylor polynomials / Pade approximants, the approximations are computed with a completely different technique. Summary: These approximations are "the best" in terms of bang (accuracy) for the buck (flops). More specifically: Among the polynomial quotients that can be computed using a fixed number of flops (with a given "+ - * / budget"), the chosen polynomial quotient is the one closest to the approximated function with respect to maximum absolute relative error over the given interval. The Remez algorithm, as implemented in the boost library's minimax package, is the key to the construction: http://www.boost.org/doc/libs/1_36_0/libs/ math/doc/sf_and_dist/html/math_toolkit/backgrounders/remez.html If outside of the interval of approximation, use the standard trig formula. */ if (x > 4.0) { const double alpha=(double) (MagickPI*x); return(sin((double) alpha)/alpha); } { /* The approximations only depend on x^2 (sinc is an even function). */ const double xx = x*x; #if MAGICKCORE_QUANTUM_DEPTH <= 8 /* Maximum absolute relative error 6.3e-6 < 1/2^17. */ const double c0 = 0.173610016489197553621906385078711564924e-2L; const double c1 = -0.384186115075660162081071290162149315834e-3L; const double c2 = 0.393684603287860108352720146121813443561e-4L; const double c3 = -0.248947210682259168029030370205389323899e-5L; const double c4 = 0.107791837839662283066379987646635416692e-6L; const double c5 = -0.324874073895735800961260474028013982211e-8L; const double c6 = 0.628155216606695311524920882748052490116e-10L; const double c7 = -0.586110644039348333520104379959307242711e-12L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #elif MAGICKCORE_QUANTUM_DEPTH <= 16 /* Max. abs. rel. error 2.2e-8 < 1/2^25. */ const double c0 = 0.173611107357320220183368594093166520811e-2L; const double c1 = -0.384240921114946632192116762889211361285e-3L; const double c2 = 0.394201182359318128221229891724947048771e-4L; const double c3 = -0.250963301609117217660068889165550534856e-5L; const double c4 = 0.111902032818095784414237782071368805120e-6L; const double c5 = -0.372895101408779549368465614321137048875e-8L; const double c6 = 0.957694196677572570319816780188718518330e-10L; const double c7 = -0.187208577776590710853865174371617338991e-11L; const double c8 = 0.253524321426864752676094495396308636823e-13L; const double c9 = -0.177084805010701112639035485248501049364e-15L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*(c7+xx*(c8+xx*c9)))))))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)*p); #else /* Max. abs. rel. error 1.2e-12 < 1/2^39. */ const double c0 = 0.173611111110910715186413700076827593074e-2L; const double c1 = -0.289105544717893415815859968653611245425e-3L; const double c2 = 0.206952161241815727624413291940849294025e-4L; const double c3 = -0.834446180169727178193268528095341741698e-6L; const double c4 = 0.207010104171026718629622453275917944941e-7L; const double c5 = -0.319724784938507108101517564300855542655e-9L; const double c6 = 0.288101675249103266147006509214934493930e-11L; const double c7 = -0.118218971804934245819960233886876537953e-13L; const double p = c0+xx*(c1+xx*(c2+xx*(c3+xx*(c4+xx*(c5+xx*(c6+xx*c7)))))); const double d0 = 1.0L; const double d1 = 0.547981619622284827495856984100563583948e-1L; const double d2 = 0.134226268835357312626304688047086921806e-2L; const double d3 = 0.178994697503371051002463656833597608689e-4L; const double d4 = 0.114633394140438168641246022557689759090e-6L; const double q = d0+xx*(d1+xx*(d2+xx*(d3+xx*d4))); return((xx-1.0)*(xx-4.0)*(xx-9.0)*(xx-16.0)/q*p); #endif } } static double Triangle(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* 1st order (linear) B-Spline, bilinear interpolation, Tent 1D filter, or a Bartlett 2D Cone filter. Also used as a Bartlett Windowing function for Sinc(). */ if (x < 1.0) return(1.0-x); return(0.0); } static double Welch(const double x, const ResizeFilter *magick_unused(resize_filter)) { magick_unreferenced(resize_filter); /* Welch parabolic windowing filter. */ if (x < 1.0) return(1.0-x*x); return(0.0); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResizeFilter() allocates the ResizeFilter structure. Choose from % these filters: % % FIR (Finite impulse Response) Filters % Box Triangle Quadratic % Spline Hermite Catrom % Mitchell % % IIR (Infinite impulse Response) Filters % Gaussian Sinc Jinc (Bessel) % % Windowed Sinc/Jinc Filters % Blackman Bohman Lanczos % Hann Hamming Cosine % Kaiser Welch Parzen % Bartlett % % Special Purpose Filters % Cubic SincFast LanczosSharp Lanczos2 Lanczos2Sharp % Robidoux RobidouxSharp % % The users "-filter" selection is used to lookup the default 'expert' % settings for that filter from a internal table. However any provided % 'expert' settings (see below) may override this selection. % % FIR filters are used as is, and are limited to that filters support window % (unless over-ridden). 'Gaussian' while classed as an IIR filter, is also % simply clipped by its support size (currently 1.5 or approximately 3*sigma % as recommended by many references) % % The special a 'cylindrical' filter flag will promote the default 4-lobed % Windowed Sinc filter to a 3-lobed Windowed Jinc equivalent, which is better % suited to this style of image resampling. This typically happens when using % such a filter for images distortions. % % SPECIFIC FILTERS: % % Directly requesting 'Sinc', 'Jinc' function as a filter will force the use % of function without any windowing, or promotion for cylindrical usage. This % is not recommended, except by image processing experts, especially as part % of expert option filter function selection. % % Two forms of the 'Sinc' function are available: Sinc and SincFast. Sinc is % computed using the traditional sin(pi*x)/(pi*x); it is selected if the user % specifically specifies the use of a Sinc filter. SincFast uses highly % accurate (and fast) polynomial (low Q) and rational (high Q) approximations, % and will be used by default in most cases. % % The Lanczos filter is a special 3-lobed Sinc-windowed Sinc filter (promoted % to Jinc-windowed Jinc for cylindrical (Elliptical Weighted Average) use). % The Sinc version is the most popular windowed filter. % % LanczosSharp is a slightly sharpened (blur=0.9812505644269356 < 1) form of % the Lanczos filter, specifically designed for EWA distortion (as a % Jinc-Jinc); it can also be used as a slightly sharper orthogonal Lanczos % (Sinc-Sinc) filter. The chosen blur value comes as close as possible to % satisfying the following condition without changing the character of the % corresponding EWA filter: % % 'No-Op' Vertical and Horizontal Line Preservation Condition: Images with % only vertical or horizontal features are preserved when performing 'no-op" % with EWA distortion. % % The Lanczos2 and Lanczos2Sharp filters are 2-lobe versions of the Lanczos % filters. The 'sharp' version uses a blur factor of 0.9549963639785485, % again chosen because the resulting EWA filter comes as close as possible to % satisfying the above condition. % % Robidoux is another filter tuned for EWA. It is the Keys cubic filter % defined by B=(228 - 108 sqrt(2))/199. Robidoux satisfies the "'No-Op' % Vertical and Horizontal Line Preservation Condition" exactly, and it % moderately blurs high frequency 'pixel-hash' patterns under no-op. It turns % out to be close to both Mitchell and Lanczos2Sharp. For example, its first % crossing is at (36 sqrt(2) + 123)/(72 sqrt(2) + 47), almost the same as the % first crossing of Mitchell and Lanczos2Sharp. % % RobidouxSharp is a slightly sharper version of Robidoux, some believe it % is too sharp. It is designed to minimize the maximum possible change in % a pixel value which is at one of the extremes (e.g., 0 or 255) under no-op % conditions. Amazingly Mitchell falls roughly between Robidoux and % RobidouxSharp, though this seems to have been pure coincidence. % % 'EXPERT' OPTIONS: % % These artifact "defines" are not recommended for production use without % expert knowledge of resampling, filtering, and the effects they have on the % resulting resampled (resized or distorted) image. % % They can be used to override any and all filter default, and it is % recommended you make good use of "filter:verbose" to make sure that the % overall effect of your selection (before and after) is as expected. % % "filter:verbose" controls whether to output the exact results of the % filter selections made, as well as plotting data for graphing the % resulting filter over the filters support range. % % "filter:filter" select the main function associated with this filter % name, as the weighting function of the filter. This can be used to % set a windowing function as a weighting function, for special % purposes, such as graphing. % % If a "filter:window" operation has not been provided, a 'Box' % windowing function will be set to denote that no windowing function is % being used. % % "filter:window" Select this windowing function for the filter. While any % filter could be used as a windowing function, using the 'first lobe' of % that filter over the whole support window, using a non-windowing % function is not advisible. If no weighting filter function is specified % a 'SincFast' filter is used. % % "filter:lobes" Number of lobes to use for the Sinc/Jinc filter. This a % simpler method of setting filter support size that will correctly % handle the Sinc/Jinc switch for an operators filtering requirements. % Only integers should be given. % % "filter:support" Set the support size for filtering to the size given. % This not recommended for Sinc/Jinc windowed filters (lobes should be % used instead). This will override any 'filter:lobes' option. % % "filter:win-support" Scale windowing function to this size instead. This % causes the windowing (or self-windowing Lagrange filter) to act is if % the support window it much much larger than what is actually supplied % to the calling operator. The filter however is still clipped to the % real support size given, by the support range supplied to the caller. % If unset this will equal the normal filter support size. % % "filter:blur" Scale the filter and support window by this amount. A value % of > 1 will generally result in a more blurred image with more ringing % effects, while a value <1 will sharpen the resulting image with more % aliasing effects. % % "filter:sigma" The sigma value to use for the Gaussian filter only. % Defaults to '1/2'. Using a different sigma effectively provides a % method of using the filter as a 'blur' convolution. Particularly when % using it for Distort. % % "filter:b" % "filter:c" Override the preset B,C values for a Cubic filter. % If only one of these are given it is assumes to be a 'Keys' type of % filter such that B+2C=1, where Keys 'alpha' value = C. % % Examples: % % Set a true un-windowed Sinc filter with 10 lobes (very slow): % -define filter:filter=Sinc % -define filter:lobes=8 % % Set an 8 lobe Lanczos (Sinc or Jinc) filter: % -filter Lanczos % -define filter:lobes=8 % % The format of the AcquireResizeFilter method is: % % ResizeFilter *AcquireResizeFilter(const Image *image, % const FilterType filter_type,const MagickBooleanType cylindrical, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filter: the filter type, defining a preset filter, window and support. % The artifact settings listed above will override those selections. % % o blur: blur the filter by this amount, use 1.0 if unknown. Image % artifact "filter:blur" will override this API call usage, including any % internal change (such as for cylindrical usage). % % o radial: use a 1D orthogonal filter (Sinc) or 2D cylindrical (radial) % filter (Jinc). % % o exception: return any errors or warnings in this structure. % */ MagickPrivate ResizeFilter *AcquireResizeFilter(const Image *image, const FilterType filter,const MagickBooleanType cylindrical, ExceptionInfo *exception) { const char *artifact; FilterType filter_type, window_type; double B, C, value; ResizeFilter *resize_filter; /* Table Mapping given Filter, into Weighting and Windowing functions. A 'Box' windowing function means its a simble non-windowed filter. An 'SincFast' filter function could be upgraded to a 'Jinc' filter if a "cylindrical" is requested, unless a 'Sinc' or 'SincFast' filter was specifically requested by the user. WARNING: The order of this table must match the order of the FilterType enumeration specified in "resample.h", or the filter names will not match the filter being setup. You can check filter setups with the "filter:verbose" expert setting. */ static struct { FilterType filter, window; } const mapping[SentinelFilter] = { { UndefinedFilter, BoxFilter }, /* Undefined (default to Box) */ { PointFilter, BoxFilter }, /* SPECIAL: Nearest neighbour */ { BoxFilter, BoxFilter }, /* Box averaging filter */ { TriangleFilter, BoxFilter }, /* Linear interpolation filter */ { HermiteFilter, BoxFilter }, /* Hermite interpolation filter */ { SincFastFilter, HannFilter }, /* Hann -- cosine-sinc */ { SincFastFilter, HammingFilter }, /* Hamming -- '' variation */ { SincFastFilter, BlackmanFilter }, /* Blackman -- 2*cosine-sinc */ { GaussianFilter, BoxFilter }, /* Gaussian blur filter */ { QuadraticFilter, BoxFilter }, /* Quadratic Gaussian approx */ { CubicFilter, BoxFilter }, /* General Cubic Filter, Spline */ { CatromFilter, BoxFilter }, /* Cubic-Keys interpolator */ { MitchellFilter, BoxFilter }, /* 'Ideal' Cubic-Keys filter */ { JincFilter, BoxFilter }, /* Raw 3-lobed Jinc function */ { SincFilter, BoxFilter }, /* Raw 4-lobed Sinc function */ { SincFastFilter, BoxFilter }, /* Raw fast sinc ("Pade"-type) */ { SincFastFilter, KaiserFilter }, /* Kaiser -- square root-sinc */ { LanczosFilter, WelchFilter }, /* Welch -- parabolic (3 lobe) */ { SincFastFilter, CubicFilter }, /* Parzen -- cubic-sinc */ { SincFastFilter, BohmanFilter }, /* Bohman -- 2*cosine-sinc */ { SincFastFilter, TriangleFilter }, /* Bartlett -- triangle-sinc */ { LagrangeFilter, BoxFilter }, /* Lagrange self-windowing */ { LanczosFilter, LanczosFilter }, /* Lanczos Sinc-Sinc filters */ { LanczosSharpFilter, LanczosSharpFilter }, /* | these require */ { Lanczos2Filter, Lanczos2Filter }, /* | special handling */ { Lanczos2SharpFilter, Lanczos2SharpFilter }, { RobidouxFilter, BoxFilter }, /* Cubic Keys tuned for EWA */ { RobidouxSharpFilter, BoxFilter }, /* Sharper Cubic Keys for EWA */ { LanczosFilter, CosineFilter }, /* Cosine window (3 lobes) */ { SplineFilter, BoxFilter }, /* Spline Cubic Filter */ { LanczosRadiusFilter, LanczosFilter }, /* Lanczos with integer radius */ { CubicSplineFilter, BoxFilter }, /* CubicSpline (2/3/4 lobes) */ }; /* Table mapping the filter/window from the above table to an actual function. The default support size for that filter as a weighting function, the range to scale with to use that function as a sinc windowing function, (typ 1.0). Note that the filter_type -> function is 1 to 1 except for Sinc(), SincFast(), and CubicBC() functions, which may have multiple filter to function associations. See "filter:verbose" handling below for the function -> filter mapping. */ static struct { double (*function)(const double,const ResizeFilter*), support, /* Default lobes/support size of the weighting filter. */ scale, /* Support when function used as a windowing function Typically equal to the location of the first zero crossing. */ B,C; /* BC-spline coefficients, ignored if not a CubicBC filter. */ ResizeWeightingFunctionType weightingFunctionType; } const filters[SentinelFilter] = { /* .--- support window (if used as a Weighting Function) | .--- first crossing (if used as a Windowing Function) | | .--- B value for Cubic Function | | | .---- C value for Cubic Function | | | | */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Undefined (default to Box) */ { Box, 0.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Point (special handling) */ { Box, 0.5, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Box */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Triangle */ { CubicBC, 1.0, 1.0, 0.0, 0.0, CubicBCWeightingFunction }, /* Hermite (cubic B=C=0) */ { Hann, 1.0, 1.0, 0.0, 0.0, HannWeightingFunction }, /* Hann, cosine window */ { Hamming, 1.0, 1.0, 0.0, 0.0, HammingWeightingFunction }, /* Hamming, '' variation */ { Blackman, 1.0, 1.0, 0.0, 0.0, BlackmanWeightingFunction }, /* Blackman, 2*cosine window */ { Gaussian, 2.0, 1.5, 0.0, 0.0, GaussianWeightingFunction }, /* Gaussian */ { Quadratic, 1.5, 1.5, 0.0, 0.0, QuadraticWeightingFunction },/* Quadratic gaussian */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* General Cubic Filter */ { CubicBC, 2.0, 1.0, 0.0, 0.5, CubicBCWeightingFunction }, /* Catmull-Rom (B=0,C=1/2) */ { CubicBC, 2.0, 8.0/7.0, 1./3., 1./3., CubicBCWeightingFunction }, /* Mitchell (B=C=1/3) */ { Jinc, 3.0, 1.2196698912665045, 0.0, 0.0, JincWeightingFunction }, /* Raw 3-lobed Jinc */ { Sinc, 4.0, 1.0, 0.0, 0.0, SincWeightingFunction }, /* Raw 4-lobed Sinc */ { SincFast, 4.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Raw fast sinc ("Pade"-type) */ { Kaiser, 1.0, 1.0, 0.0, 0.0, KaiserWeightingFunction }, /* Kaiser (square root window) */ { Welch, 1.0, 1.0, 0.0, 0.0, WelchWeightingFunction }, /* Welch (parabolic window) */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Parzen (B-Spline window) */ { Bohman, 1.0, 1.0, 0.0, 0.0, BohmanWeightingFunction }, /* Bohman, 2*Cosine window */ { Triangle, 1.0, 1.0, 0.0, 0.0, TriangleWeightingFunction }, /* Bartlett (triangle window) */ { Lagrange, 2.0, 1.0, 0.0, 0.0, LagrangeWeightingFunction }, /* Lagrange sinc approximation */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 3-lobed Sinc-Sinc */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Sharpened */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, 2-lobed */ { SincFast, 2.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos2, sharpened */ /* Robidoux: Keys cubic close to Lanczos2D sharpened */ { CubicBC, 2.0, 1.1685777620836932, 0.37821575509399867, 0.31089212245300067, CubicBCWeightingFunction }, /* RobidouxSharp: Sharper version of Robidoux */ { CubicBC, 2.0, 1.105822933719019, 0.2620145123990142, 0.3689927438004929, CubicBCWeightingFunction }, { Cosine, 1.0, 1.0, 0.0, 0.0, CosineWeightingFunction }, /* Low level cosine window */ { CubicBC, 2.0, 2.0, 1.0, 0.0, CubicBCWeightingFunction }, /* Cubic B-Spline (B=1,C=0) */ { SincFast, 3.0, 1.0, 0.0, 0.0, SincFastWeightingFunction }, /* Lanczos, Interger Radius */ { CubicSpline,2.0, 0.5, 0.0, 0.0, BoxWeightingFunction }, /* Spline Lobes 2-lobed */ }; /* The known zero crossings of the Jinc() or more accurately the Jinc(x*PI) function being used as a filter. It is used by the "filter:lobes" expert setting and for 'lobes' for Jinc functions in the previous table. This way users do not have to deal with the highly irrational lobe sizes of the Jinc filter. Values taken from http://cose.math.bas.bg/webMathematica/webComputing/BesselZeros.jsp using Jv-function with v=1, then dividing by PI. */ static double jinc_zeros[16] = { 1.2196698912665045, 2.2331305943815286, 3.2383154841662362, 4.2410628637960699, 5.2427643768701817, 6.2439216898644877, 7.2447598687199570, 8.2453949139520427, 9.2458926849494673, 10.246293348754916, 11.246622794877883, 12.246898461138105, 13.247132522181061, 14.247333735806849, 15.247508563037300, 16.247661874700962 }; /* Allocate resize filter. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(UndefinedFilter < filter && filter < SentinelFilter); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) exception; resize_filter=(ResizeFilter *) AcquireCriticalMemory(sizeof(*resize_filter)); (void) memset(resize_filter,0,sizeof(*resize_filter)); /* Defaults for the requested filter. */ filter_type=mapping[filter].filter; window_type=mapping[filter].window; resize_filter->blur=1.0; /* Promote 1D Windowed Sinc Filters to a 2D Windowed Jinc filters */ if ((cylindrical != MagickFalse) && (filter_type == SincFastFilter) && (filter != SincFastFilter)) filter_type=JincFilter; /* 1D Windowed Sinc => 2D Windowed Jinc filters */ /* Expert filter setting override */ artifact=GetImageArtifact(image,"filter:filter"); if (IsStringTrue(artifact) != MagickFalse) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { /* Raw filter request - no window function. */ filter_type=(FilterType) option; window_type=BoxFilter; } /* Filter override with a specific window function. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) window_type=(FilterType) option; } } else { /* Window specified, but no filter function? Assume Sinc/Jinc. */ artifact=GetImageArtifact(image,"filter:window"); if (artifact != (const char *) NULL) { ssize_t option; option=ParseCommandOption(MagickFilterOptions,MagickFalse,artifact); if ((UndefinedFilter < option) && (option < SentinelFilter)) { filter_type= cylindrical != MagickFalse ? JincFilter : SincFastFilter; window_type=(FilterType) option; } } } /* Assign the real functions to use for the filters selected. */ resize_filter->filter=filters[filter_type].function; resize_filter->support=filters[filter_type].support; resize_filter->filterWeightingType=filters[filter_type].weightingFunctionType; resize_filter->window=filters[window_type].function; resize_filter->windowWeightingType=filters[window_type].weightingFunctionType; resize_filter->scale=filters[window_type].scale; resize_filter->signature=MagickCoreSignature; /* Filter Modifications for orthogonal/cylindrical usage */ if (cylindrical != MagickFalse) switch (filter_type) { case BoxFilter: /* Support for Cylindrical Box should be sqrt(2)/2 */ resize_filter->support=(double) MagickSQ1_2; break; case LanczosFilter: case LanczosSharpFilter: case Lanczos2Filter: case Lanczos2SharpFilter: case LanczosRadiusFilter: resize_filter->filter=filters[JincFilter].function; resize_filter->window=filters[JincFilter].function; resize_filter->scale=filters[JincFilter].scale; /* number of lobes (support window size) remain unchanged */ break; default: break; } /* Global Sharpening (regardless of orthoginal/cylindrical) */ switch (filter_type) { case LanczosSharpFilter: resize_filter->blur *= 0.9812505644269356; break; case Lanczos2SharpFilter: resize_filter->blur *= 0.9549963639785485; break; /* case LanczosRadius: blur adjust is done after lobes */ default: break; } /* Expert Option Modifications. */ /* User Gaussian Sigma Override - no support change */ if ((resize_filter->filter == Gaussian) || (resize_filter->window == Gaussian) ) { value=0.5; /* guassian sigma default, half pixel */ artifact=GetImageArtifact(image,"filter:sigma"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); /* Define coefficents for Gaussian */ resize_filter->coefficient[0]=value; /* note sigma too */ resize_filter->coefficient[1]=PerceptibleReciprocal(2.0*value*value); /* sigma scaling */ resize_filter->coefficient[2]=PerceptibleReciprocal(Magick2PI*value*value); /* normalization - not actually needed or used! */ if ( value > 0.5 ) resize_filter->support *= 2*value; /* increase support linearly */ } /* User Kaiser Alpha Override - no support change */ if ((resize_filter->filter == Kaiser) || (resize_filter->window == Kaiser) ) { value=6.5; /* default beta value for Kaiser bessel windowing function */ artifact=GetImageArtifact(image,"filter:alpha"); /* FUTURE: depreciate */ if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-beta"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"filter:kaiser-alpha"); if (artifact != (const char *) NULL) value=StringToDouble(artifact,(char **) NULL)*MagickPI; /* Define coefficents for Kaiser Windowing Function */ resize_filter->coefficient[0]=value; /* alpha */ resize_filter->coefficient[1]=PerceptibleReciprocal(I0(value)); /* normalization */ } /* Support Overrides */ artifact=GetImageArtifact(image,"filter:lobes"); if (artifact != (const char *) NULL) { ssize_t lobes; lobes=(ssize_t) StringToLong(artifact); if (lobes < 1) lobes=1; resize_filter->support=(double) lobes; } if (resize_filter->filter == Jinc) { /* Convert a Jinc function lobes value to a real support value. */ if (resize_filter->support > 16) resize_filter->support=jinc_zeros[15]; /* largest entry in table */ else resize_filter->support=jinc_zeros[((long) resize_filter->support)-1]; /* Blur this filter so support is a integer value (lobes dependant). */ if (filter_type == LanczosRadiusFilter) resize_filter->blur*=floor(resize_filter->support)/ resize_filter->support; } /* Expert blur override. */ artifact=GetImageArtifact(image,"filter:blur"); if (artifact != (const char *) NULL) resize_filter->blur*=StringToDouble(artifact,(char **) NULL); if (resize_filter->blur < MagickEpsilon) resize_filter->blur=(double) MagickEpsilon; /* Expert override of the support setting. */ artifact=GetImageArtifact(image,"filter:support"); if (artifact != (const char *) NULL) resize_filter->support=fabs(StringToDouble(artifact,(char **) NULL)); /* Scale windowing function separately to the support 'clipping' window that calling operator is planning to actually use. (Expert override) */ resize_filter->window_support=resize_filter->support; /* default */ artifact=GetImageArtifact(image,"filter:win-support"); if (artifact != (const char *) NULL) resize_filter->window_support=fabs(StringToDouble(artifact,(char **) NULL)); /* Adjust window function scaling to match windowing support for weighting function. This avoids a division on every filter call. */ resize_filter->scale*=PerceptibleReciprocal(resize_filter->window_support); /* Set Cubic Spline B,C values, calculate Cubic coefficients. */ B=0.0; C=0.0; if ((resize_filter->filter == CubicBC) || (resize_filter->window == CubicBC) ) { B=filters[filter_type].B; C=filters[filter_type].C; if (filters[window_type].function == CubicBC) { B=filters[window_type].B; C=filters[window_type].C; } artifact=GetImageArtifact(image,"filter:b"); if (artifact != (const char *) NULL) { B=StringToDouble(artifact,(char **) NULL); C=(1.0-B)/2.0; /* Calculate C to get a Keys cubic filter. */ artifact=GetImageArtifact(image,"filter:c"); /* user C override */ if (artifact != (const char *) NULL) C=StringToDouble(artifact,(char **) NULL); } else { artifact=GetImageArtifact(image,"filter:c"); if (artifact != (const char *) NULL) { C=StringToDouble(artifact,(char **) NULL); B=1.0-2.0*C; /* Calculate B to get a Keys cubic filter. */ } } { const double twoB = B+B; /* Convert B,C values into Cubic Coefficents. See CubicBC(). */ resize_filter->coefficient[0]=1.0-(1.0/3.0)*B; resize_filter->coefficient[1]=-3.0+twoB+C; resize_filter->coefficient[2]=2.0-1.5*B-C; resize_filter->coefficient[3]=(4.0/3.0)*B+4.0*C; resize_filter->coefficient[4]=-8.0*C-twoB; resize_filter->coefficient[5]=B+5.0*C; resize_filter->coefficient[6]=(-1.0/6.0)*B-C; } } /* Expert Option Request for verbose details of the resulting filter. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp master { #endif if (IsStringTrue(GetImageArtifact(image,"filter:verbose")) != MagickFalse) { double support, x; /* Set the weighting function properly when the weighting function may not exactly match the filter of the same name. EG: a Point filter is really uses a Box weighting function with a different support than is typically used. */ if (resize_filter->filter == Box) filter_type=BoxFilter; if (resize_filter->filter == Sinc) filter_type=SincFilter; if (resize_filter->filter == SincFast) filter_type=SincFastFilter; if (resize_filter->filter == Jinc) filter_type=JincFilter; if (resize_filter->filter == CubicBC) filter_type=CubicFilter; if (resize_filter->window == Box) window_type=BoxFilter; if (resize_filter->window == Sinc) window_type=SincFilter; if (resize_filter->window == SincFast) window_type=SincFastFilter; if (resize_filter->window == Jinc) window_type=JincFilter; if (resize_filter->window == CubicBC) window_type=CubicFilter; /* Report Filter Details. */ support=GetResizeFilterSupport(resize_filter); /* practical_support */ (void) FormatLocaleFile(stdout, "# Resampling Filter (for graphing)\n#\n"); (void) FormatLocaleFile(stdout,"# filter = %s\n", CommandOptionToMnemonic(MagickFilterOptions,filter_type)); (void) FormatLocaleFile(stdout,"# window = %s\n", CommandOptionToMnemonic(MagickFilterOptions,window_type)); (void) FormatLocaleFile(stdout,"# support = %.*g\n", GetMagickPrecision(),(double) resize_filter->support); (void) FormatLocaleFile(stdout,"# window-support = %.*g\n", GetMagickPrecision(),(double) resize_filter->window_support); (void) FormatLocaleFile(stdout,"# scale-blur = %.*g\n", GetMagickPrecision(),(double) resize_filter->blur); if ((filter_type == GaussianFilter) || (window_type == GaussianFilter)) (void) FormatLocaleFile(stdout,"# gaussian-sigma = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); if ( filter_type == KaiserFilter || window_type == KaiserFilter ) (void) FormatLocaleFile(stdout,"# kaiser-beta = %.*g\n", GetMagickPrecision(),(double) resize_filter->coefficient[0]); (void) FormatLocaleFile(stdout,"# practical-support = %.*g\n", GetMagickPrecision(), (double) support); if ((filter_type == CubicFilter) || (window_type == CubicFilter)) (void) FormatLocaleFile(stdout,"# B,C = %.*g,%.*g\n", GetMagickPrecision(),(double) B,GetMagickPrecision(),(double) C); (void) FormatLocaleFile(stdout,"\n"); /* Output values of resulting filter graph -- for graphing filter result. */ for (x=0.0; x <= support; x+=0.01f) (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",x, GetMagickPrecision(),(double) GetResizeFilterWeight(resize_filter,x)); /* A final value so gnuplot can graph the 'stop' properly. */ (void) FormatLocaleFile(stdout,"%5.2lf\t%.*g\n",support, GetMagickPrecision(),0.0); } /* Output the above once only for each image - remove setting */ (void) DeleteImageArtifact((Image *) image,"filter:verbose"); #if defined(MAGICKCORE_OPENMP_SUPPORT) } #endif return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveResizeImage() adaptively resize image with pixel resampling. % % This is shortcut function for a fast interpolative resize using mesh % interpolation. It works well for small resizes of less than +/- 50% % of the original image size. For larger resizing on images a full % filtered and slower resize function should be used instead. % % The format of the AdaptiveResizeImage method is: % % Image *AdaptiveResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { Image *resize_image; resize_image=InterpolativeResizeImage(image,columns,rows,MeshInterpolatePixel, exception); return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + B e s s e l O r d e r O n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BesselOrderOne() computes the Bessel function of x of the first kind of % order 0. This is used to create the Jinc() filter function below. % % Reduce x to |x| since j1(x)= -j1(-x), and for x in (0,8] % % j1(x) = x*j1(x); % % For x in (8,inf) % % j1(x) = sqrt(2/(pi*x))*(p1(x)*cos(x1)-q1(x)*sin(x1)) % % where x1 = x-3*pi/4. Compute sin(x1) and cos(x1) as follow: % % cos(x1) = cos(x)cos(3pi/4)+sin(x)sin(3pi/4) % = 1/sqrt(2) * (sin(x) - cos(x)) % sin(x1) = sin(x)cos(3pi/4)-cos(x)sin(3pi/4) % = -1/sqrt(2) * (sin(x) + cos(x)) % % The format of the BesselOrderOne method is: % % double BesselOrderOne(double x) % % A description of each parameter follows: % % o x: double value. % */ #undef I0 static double I0(double x) { double sum, t, y; ssize_t i; /* Zeroth order Bessel function of the first kind. */ sum=1.0; y=x*x/4.0; t=y; for (i=2; t > MagickEpsilon; i++) { sum+=t; t*=y/((double) i*i); } return(sum); } #undef J1 static double J1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.581199354001606143928050809e+21, -0.6672106568924916298020941484e+20, 0.2316433580634002297931815435e+19, -0.3588817569910106050743641413e+17, 0.2908795263834775409737601689e+15, -0.1322983480332126453125473247e+13, 0.3413234182301700539091292655e+10, -0.4695753530642995859767162166e+7, 0.270112271089232341485679099e+4 }, Qone[] = { 0.11623987080032122878585294e+22, 0.1185770712190320999837113348e+20, 0.6092061398917521746105196863e+17, 0.2081661221307607351240184229e+15, 0.5243710262167649715406728642e+12, 0.1013863514358673989967045588e+10, 0.1501793594998585505921097578e+7, 0.1606931573481487801970916749e+4, 0.1e+1 }; p=Pone[8]; q=Qone[8]; for (i=7; i >= 0; i--) { p=p*x*x+Pone[i]; q=q*x*x+Qone[i]; } return(p/q); } #undef P1 static double P1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.352246649133679798341724373e+5, 0.62758845247161281269005675e+5, 0.313539631109159574238669888e+5, 0.49854832060594338434500455e+4, 0.2111529182853962382105718e+3, 0.12571716929145341558495e+1 }, Qone[] = { 0.352246649133679798068390431e+5, 0.626943469593560511888833731e+5, 0.312404063819041039923015703e+5, 0.4930396490181088979386097e+4, 0.2030775189134759322293574e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } #undef Q1 static double Q1(double x) { double p, q; ssize_t i; static const double Pone[] = { 0.3511751914303552822533318e+3, 0.7210391804904475039280863e+3, 0.4259873011654442389886993e+3, 0.831898957673850827325226e+2, 0.45681716295512267064405e+1, 0.3532840052740123642735e-1 }, Qone[] = { 0.74917374171809127714519505e+4, 0.154141773392650970499848051e+5, 0.91522317015169922705904727e+4, 0.18111867005523513506724158e+4, 0.1038187585462133728776636e+3, 0.1e+1 }; p=Pone[5]; q=Qone[5]; for (i=4; i >= 0; i--) { p=p*(8.0/x)*(8.0/x)+Pone[i]; q=q*(8.0/x)*(8.0/x)+Qone[i]; } return(p/q); } static double BesselOrderOne(double x) { double p, q; if (x == 0.0) return(0.0); p=x; if (x < 0.0) x=(-x); if (x < 8.0) return(p*J1(x)); q=sqrt((double) (2.0/(MagickPI*x)))*(P1(x)*(1.0/sqrt(2.0)*(sin(x)- cos(x)))-8.0/x*Q1(x)*(-1.0/sqrt(2.0)*(sin(x)+cos(x)))); if (p < 0.0) q=(-q); return(q); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y R e s i z e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResizeFilter() destroy the resize filter. % % The format of the DestroyResizeFilter method is: % % ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) % % A description of each parameter follows: % % o resize_filter: the resize filter. % */ MagickPrivate ResizeFilter *DestroyResizeFilter(ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); resize_filter->signature=(~MagickCoreSignature); resize_filter=(ResizeFilter *) RelinquishMagickMemory(resize_filter); return(resize_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r S u p p o r t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterSupport() return the current support window size for this % filter. Note that this may have been enlarged by filter:blur factor. % % The format of the GetResizeFilterSupport method is: % % double GetResizeFilterSupport(const ResizeFilter *resize_filter) % % A description of each parameter follows: % % o filter: Image filter to use. % */ MagickPrivate double *GetResizeFilterCoefficient( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return((double *) resize_filter->coefficient); } MagickPrivate double GetResizeFilterBlur(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->blur); } MagickPrivate double GetResizeFilterScale(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->scale); } MagickPrivate double GetResizeFilterWindowSupport( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->window_support); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->filterWeightingType); } MagickPrivate ResizeWeightingFunctionType GetResizeFilterWindowWeightingType( const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->windowWeightingType); } MagickPrivate double GetResizeFilterSupport(const ResizeFilter *resize_filter) { assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); return(resize_filter->support*resize_filter->blur); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t R e s i z e F i l t e r W e i g h t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetResizeFilterWeight evaluates the specified resize filter at the point x % which usally lies between zero and the filters current 'support' and % returns the weight of the filter function at that point. % % The format of the GetResizeFilterWeight method is: % % double GetResizeFilterWeight(const ResizeFilter *resize_filter, % const double x) % % A description of each parameter follows: % % o filter: the filter type. % % o x: the point. % */ MagickPrivate double GetResizeFilterWeight(const ResizeFilter *resize_filter, const double x) { double scale, weight, x_blur; /* Windowing function - scale the weighting filter by this amount. */ assert(resize_filter != (ResizeFilter *) NULL); assert(resize_filter->signature == MagickCoreSignature); x_blur=fabs((double) x)*PerceptibleReciprocal(resize_filter->blur); /* X offset with blur scaling */ if ((resize_filter->window_support < MagickEpsilon) || (resize_filter->window == Box)) scale=1.0; /* Point or Box Filter -- avoid division by zero */ else { scale=resize_filter->scale; scale=resize_filter->window(x_blur*scale,resize_filter); } weight=scale*resize_filter->filter(x_blur,resize_filter); return(weight); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t i v e R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolativeResizeImage() resizes an image using the specified % interpolation method. % % The format of the InterpolativeResizeImage method is: % % Image *InterpolativeResizeImage(const Image *image,const size_t columns, % const size_t rows,const PixelInterpolateMethod method, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *InterpolativeResizeImage(const Image *image, const size_t columns,const size_t rows,const PixelInterpolateMethod method, ExceptionInfo *exception) { #define InterpolativeResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; Image *resize_image; MagickBooleanType status; MagickOffsetType progress; PointInfo scale; ssize_t y; /* Interpolatively resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(resize_image,DirectClass,exception) == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); scale.x=(double) image->columns/resize_image->columns; scale.y=(double) image->rows/resize_image->rows; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { PointInfo offset; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if (q == (Quantum *) NULL) continue; offset.y=((double) y+0.5)*scale.y-0.5; for (x=0; x < (ssize_t) resize_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait resize_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; offset.x=((double) x+0.5)*scale.x-0.5; status=InterpolatePixelChannels(image,image_view,resize_image,method, offset.x,offset.y,q,exception); if (status == MagickFalse) break; } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,InterpolativeResizeImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) resize_image=DestroyImage(resize_image); return(resize_image); } #if defined(MAGICKCORE_LQR_DELEGATE) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i q u i d R e s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiquidRescaleImage() rescales image with seam carving. % % The format of the LiquidRescaleImage method is: % % Image *LiquidRescaleImage(const Image *image,const size_t columns, % const size_t rows,const double delta_x,const double rigidity, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the rescaled image. % % o rows: the number of rows in the rescaled image. % % o delta_x: maximum seam transversal step (0 means straight seams). % % o rigidity: introduce a bias for non-straight seams (typically 0). % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *LiquidRescaleImage(const Image *image,const size_t columns, const size_t rows,const double delta_x,const double rigidity, ExceptionInfo *exception) { #define LiquidRescaleImageTag "Rescale/Image" CacheView *image_view, *rescale_view; gfloat *packet, *pixels; Image *rescale_image; int x_offset, y_offset; LqrCarver *carver; LqrRetVal lqr_status; MagickBooleanType status; MemoryInfo *pixel_info; gfloat *q; ssize_t y; /* Liquid rescale image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); if ((columns <= 2) || (rows <= 2)) return(ResizeImage(image,columns,rows,image->filter,exception)); pixel_info=AcquireVirtualMemory(image->columns,image->rows*MaxPixelChannels* sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) return((Image *) NULL); pixels=(gfloat *) GetVirtualMemoryBlob(pixel_info); status=MagickTrue; q=pixels; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) *q++=QuantumScale*p[i]; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); carver=lqr_carver_new_ext(pixels,(int) image->columns,(int) image->rows, (int) GetPixelChannels(image),LQR_COLDEPTH_32F); if (carver == (LqrCarver *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } lqr_carver_set_preserve_input_image(carver); lqr_status=lqr_carver_init(carver,(int) delta_x,rigidity); lqr_status=lqr_carver_resize(carver,(int) columns,(int) rows); (void) lqr_status; rescale_image=CloneImage(image,lqr_carver_get_width(carver), lqr_carver_get_height(carver),MagickTrue,exception); if (rescale_image == (Image *) NULL) { pixel_info=RelinquishVirtualMemory(pixel_info); return((Image *) NULL); } if (SetImageStorageClass(rescale_image,DirectClass,exception) == MagickFalse) { pixel_info=RelinquishVirtualMemory(pixel_info); rescale_image=DestroyImage(rescale_image); return((Image *) NULL); } rescale_view=AcquireAuthenticCacheView(rescale_image,exception); (void) lqr_carver_scan_reset(carver); while (lqr_carver_scan_ext(carver,&x_offset,&y_offset,(void **) &packet) != 0) { Quantum *magick_restrict p; ssize_t i; p=QueueCacheViewAuthenticPixels(rescale_view,x_offset,y_offset,1,1, exception); if (p == (Quantum *) NULL) break; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait rescale_traits, traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); rescale_traits=GetPixelChannelTraits(rescale_image,channel); if ((traits == UndefinedPixelTrait) || (rescale_traits == UndefinedPixelTrait)) continue; SetPixelChannel(rescale_image,channel,ClampToQuantum(QuantumRange* packet[i]),p); } if (SyncCacheViewAuthenticPixels(rescale_view,exception) == MagickFalse) break; } rescale_view=DestroyCacheView(rescale_view); pixel_info=RelinquishVirtualMemory(pixel_info); lqr_carver_destroy(carver); return(rescale_image); } #else MagickExport Image *LiquidRescaleImage(const Image *image, const size_t magick_unused(columns),const size_t magick_unused(rows), const double magick_unused(delta_x),const double magick_unused(rigidity), ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateError, "DelegateLibrarySupportNotBuiltIn","'%s' (LQR)",image->filename); return((Image *) NULL); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagnifyImage() doubles the size of the image with a pixel art scaling % algorithm. % % The format of the MagnifyImage method is: % % Image *MagnifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline void CopyPixels(const Quantum *source,const ssize_t source_offset, Quantum *destination,const ssize_t destination_offset,const size_t channels) { ssize_t i; for (i=0; i < (ssize_t) channels; i++) destination[channels*destination_offset+i]=source[source_offset*channels+i]; } static inline void MixPixels(const Quantum *source,const ssize_t *source_offset, const size_t source_size,Quantum *destination, const ssize_t destination_offset,const size_t channels) { ssize_t sum; ssize_t i; for (i=0; i < (ssize_t) channels; i++) { ssize_t j; sum=0; for (j=0; j < (ssize_t) source_size; j++) sum+=source[source_offset[j]*channels+i]; destination[channels*destination_offset+i]=(Quantum) (sum/source_size); } } static inline void Mix2Pixels(const Quantum *source, const ssize_t source_offset1,const ssize_t source_offset2, Quantum *destination,const ssize_t destination_offset,const size_t channels) { const ssize_t offsets[2] = { source_offset1, source_offset2 }; MixPixels(source,offsets,2,destination,destination_offset,channels); } static inline int PixelsEqual(const Quantum *source1,ssize_t offset1, const Quantum *source2,ssize_t offset2,const size_t channels) { ssize_t i; offset1*=channels; offset2*=channels; for (i=0; i < (ssize_t) channels; i++) if (source1[offset1+i] != source2[offset2+i]) return(0); return(1); } static inline void Eagle2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { ssize_t i; (void) source; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if (PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,0,result,0,channels); if (PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels)) CopyPixels(pixels,2,result,1,channels); if (PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels)) CopyPixels(pixels,6,result,2,channels); if (PixelsEqual(pixels,5,pixels,8,channels) && PixelsEqual(pixels,8,pixels,7,channels)) CopyPixels(pixels,8,result,3,channels); } static void Hq2XHelper(const unsigned int rule,const Quantum *source, Quantum *destination,const ssize_t destination_offset,const size_t channels, const ssize_t e,const ssize_t a,const ssize_t b,const ssize_t d, const ssize_t f,const ssize_t h) { #define caseA(N,A,B,C,D) \ case N: \ { \ const ssize_t \ offsets[4] = { A, B, C, D }; \ \ MixPixels(source,offsets,4,destination,destination_offset,channels);\ break; \ } #define caseB(N,A,B,C,D,E,F,G,H) \ case N: \ { \ const ssize_t \ offsets[8] = { A, B, C, D, E, F, G, H }; \ \ MixPixels(source,offsets,8,destination,destination_offset,channels);\ break; \ } switch (rule) { case 0: { CopyPixels(source,e,destination,destination_offset,channels); break; } caseA(1,e,e,e,a) caseA(2,e,e,e,d) caseA(3,e,e,e,b) caseA(4,e,e,d,b) caseA(5,e,e,a,b) caseA(6,e,e,a,d) caseB(7,e,e,e,e,e,b,b,d) caseB(8,e,e,e,e,e,d,d,b) caseB(9,e,e,e,e,e,e,d,b) caseB(10,e,e,d,d,d,b,b,b) case 11: { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); break; } case 12: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 13: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 14: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[16] = { e, e, e, e, e, e, e, e, e, e, e, e, e, e, d, b }; MixPixels(source,offsets,16,destination,destination_offset,channels); } else CopyPixels(source,e,destination,destination_offset,channels); break; } case 15: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[4] = { e, e, d, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 16: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, e, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 17: { if (PixelsEqual(source,b,source,d,channels)) { const ssize_t offsets[8] = { e, e, d, d, d, b, b, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, a }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } case 18: { if (PixelsEqual(source,b,source,f,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, b, b, d }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, d }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } default: { if (PixelsEqual(source,d,source,h,channels)) { const ssize_t offsets[8] = { e, e, e, e, e, d, d, b }; MixPixels(source,offsets,8,destination,destination_offset,channels); } else { const ssize_t offsets[4] = { e, e, e, b }; MixPixels(source,offsets,4,destination,destination_offset,channels); } break; } } #undef caseA #undef caseB } static inline unsigned int Hq2XPatternToNumber(const int *pattern) { ssize_t i; unsigned int result, order; result=0; order=1; for (i=7; i >= 0; i--) { result+=order*pattern[i]; order*=2; } return(result); } static inline void Hq2X(const Image *source,const Quantum *pixels, Quantum *result,const size_t channels) { static const unsigned int Hq2XTable[] = { 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 12, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 18, 4, 4, 6, 18, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 12, 12, 5, 19, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 19, 1, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 18, 5, 3, 16, 12, 5, 19, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 15, 12, 5, 3, 17, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 17, 13, 5, 3, 16, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 13, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 16, 13, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 12, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 16, 12, 5, 3, 1, 14, 4, 4, 6, 2, 4, 4, 6, 2, 5, 3, 1, 12, 5, 3, 1, 14 }; const int pattern1[] = { !PixelsEqual(pixels,4,pixels,8,channels), !PixelsEqual(pixels,4,pixels,7,channels), !PixelsEqual(pixels,4,pixels,6,channels), !PixelsEqual(pixels,4,pixels,5,channels), !PixelsEqual(pixels,4,pixels,3,channels), !PixelsEqual(pixels,4,pixels,2,channels), !PixelsEqual(pixels,4,pixels,1,channels), !PixelsEqual(pixels,4,pixels,0,channels) }; #define Rotated(p) p[2], p[4], p[7], p[1], p[6], p[0], p[3], p[5] const int pattern2[] = { Rotated(pattern1) }; const int pattern3[] = { Rotated(pattern2) }; const int pattern4[] = { Rotated(pattern3) }; #undef Rotated (void) source; Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern1)],pixels,result,0, channels,4,0,1,3,5,7); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern2)],pixels,result,1, channels,4,2,5,1,7,3); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern3)],pixels,result,3, channels,4,8,7,5,3,1); Hq2XHelper(Hq2XTable[Hq2XPatternToNumber(pattern4)],pixels,result,2, channels,4,6,3,7,1,5); } static void Fish2X(const Image *source,const Quantum *pixels,Quantum *result, const size_t channels) { #define Corner(A,B,C,D) \ { \ if (intensities[B] > intensities[A]) \ { \ const ssize_t \ offsets[3] = { B, C, D }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ else \ { \ const ssize_t \ offsets[3] = { A, B, C }; \ \ MixPixels(pixels,offsets,3,result,3,channels); \ } \ } #define Line(A,B,C,D) \ { \ if (intensities[C] > intensities[A]) \ Mix2Pixels(pixels,C,D,result,3,channels); \ else \ Mix2Pixels(pixels,A,B,result,3,channels); \ } const ssize_t pixels_offsets[4] = { 0, 1, 3, 4 }; MagickFloatType intensities[9]; int ae, bd, ab, ad, be, de; ssize_t i; for (i=0; i < 9; i++) intensities[i]=GetPixelIntensity(source,pixels + i*channels); CopyPixels(pixels,0,result,0,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[1] ? 0 : 1),result, 1,channels); CopyPixels(pixels,(ssize_t) (intensities[0] > intensities[3] ? 0 : 3),result, 2,channels); ae=PixelsEqual(pixels,0,pixels,4,channels); bd=PixelsEqual(pixels,1,pixels,3,channels); ab=PixelsEqual(pixels,0,pixels,1,channels); de=PixelsEqual(pixels,3,pixels,4,channels); ad=PixelsEqual(pixels,0,pixels,3,channels); be=PixelsEqual(pixels,1,pixels,4,channels); if (ae && bd && ab) { CopyPixels(pixels,0,result,3,channels); return; } if (ad && de && !ab) { Corner(1,0,4,3) return; } if (be && de && !ab) { Corner(0,1,3,4) return; } if (ad && ab && !be) { Corner(4,3,1,0) return; } if (ab && be && !ad) { Corner(3,0,4,1) return; } if (ae && (!bd || intensities[1] > intensities[0])) { Mix2Pixels(pixels,0,4,result,3,channels); return; } if (bd && (!ae || intensities[0] > intensities[1])) { Mix2Pixels(pixels,1,3,result,3,channels); return; } if (ab) { Line(0,1,3,4) return; } if (de) { Line(3,4,0,1) return; } if (ad) { Line(0,3,1,4) return; } if (be) { Line(1,4,0,3) return; } MixPixels(pixels,pixels_offsets,4,result,3,channels); #undef Corner #undef Line } static void Xbr2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { #define WeightVar(M,N) const int w_##M##_##N = \ PixelsEqual(pixels,M,pixels,N,channels) ? 0 : 1; WeightVar(12,11) WeightVar(12,7) WeightVar(12,13) WeightVar(12,17) WeightVar(12,16) WeightVar(12,8) WeightVar(6,10) WeightVar(6,2) WeightVar(11,7) WeightVar(11,17) WeightVar(11,5) WeightVar(7,13) WeightVar(7,1) WeightVar(12,6) WeightVar(12,18) WeightVar(8,14) WeightVar(8,2) WeightVar(13,17) WeightVar(13,9) WeightVar(7,3) WeightVar(16,10) WeightVar(16,22) WeightVar(17,21) WeightVar(11,15) WeightVar(18,14) WeightVar(18,22) WeightVar(17,23) WeightVar(17,19) #undef WeightVar magick_unreferenced(source); if ( w_12_16 + w_12_8 + w_6_10 + w_6_2 + (4 * w_11_7) < w_11_17 + w_11_5 + w_7_13 + w_7_1 + (4 * w_12_6) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_7 ? 11 : 7),12,result,0, channels); else CopyPixels(pixels,12,result,0,channels); if ( w_12_18 + w_12_6 + w_8_14 + w_8_2 + (4 * w_7_13) < w_13_17 + w_13_9 + w_11_7 + w_7_3 + (4 * w_12_8) ) Mix2Pixels(pixels,(ssize_t) (w_12_7 <= w_12_13 ? 7 : 13),12,result,1, channels); else CopyPixels(pixels,12,result,1,channels); if ( w_12_6 + w_12_18 + w_16_10 + w_16_22 + (4 * w_11_17) < w_11_7 + w_11_15 + w_13_17 + w_17_21 + (4 * w_12_16) ) Mix2Pixels(pixels,(ssize_t) (w_12_11 <= w_12_17 ? 11 : 17),12,result,2, channels); else CopyPixels(pixels,12,result,2,channels); if ( w_12_8 + w_12_16 + w_18_14 + w_18_22 + (4 * w_13_17) < w_11_17 + w_17_23 + w_17_19 + w_7_13 + (4 * w_12_18) ) Mix2Pixels(pixels,(ssize_t) (w_12_13 <= w_12_17 ? 13 : 17),12,result,3, channels); else CopyPixels(pixels,12,result,3,channels); } static void Scale2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { magick_unreferenced(source); if (PixelsEqual(pixels,1,pixels,7,channels) || PixelsEqual(pixels,3,pixels,5,channels)) { ssize_t i; for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); return; } if (PixelsEqual(pixels,1,pixels,3,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if (PixelsEqual(pixels,1,pixels,5,channels)) CopyPixels(pixels,5,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,3,channels); else CopyPixels(pixels,4,result,3,channels); } static void Epbx2X(const Image *magick_unused(source),const Quantum *pixels, Quantum *result,const size_t channels) { #define HelperCond(a,b,c,d,e,f,g) ( \ PixelsEqual(pixels,a,pixels,b,channels) && ( \ PixelsEqual(pixels,c,pixels,d,channels) || \ PixelsEqual(pixels,c,pixels,e,channels) || \ PixelsEqual(pixels,a,pixels,f,channels) || \ PixelsEqual(pixels,b,pixels,g,channels) \ ) \ ) ssize_t i; magick_unreferenced(source); for (i=0; i < 4; i++) CopyPixels(pixels,4,result,i,channels); if ( !PixelsEqual(pixels,3,pixels,5,channels) && !PixelsEqual(pixels,1,pixels,7,channels) && ( PixelsEqual(pixels,4,pixels,3,channels) || PixelsEqual(pixels,4,pixels,7,channels) || PixelsEqual(pixels,4,pixels,5,channels) || PixelsEqual(pixels,4,pixels,1,channels) || ( ( !PixelsEqual(pixels,0,pixels,8,channels) || PixelsEqual(pixels,4,pixels,6,channels) || PixelsEqual(pixels,3,pixels,2,channels) ) && ( !PixelsEqual(pixels,6,pixels,2,channels) || PixelsEqual(pixels,4,pixels,0,channels) || PixelsEqual(pixels,4,pixels,8,channels) ) ) ) ) { if (HelperCond(1,3,4,0,8,2,6)) Mix2Pixels(pixels,1,3,result,0,channels); if (HelperCond(5,1,4,2,6,8,0)) Mix2Pixels(pixels,5,1,result,1,channels); if (HelperCond(3,7,4,6,2,0,8)) Mix2Pixels(pixels,3,7,result,2,channels); if (HelperCond(7,5,4,8,0,6,2)) Mix2Pixels(pixels,7,5,result,3,channels); } #undef HelperCond } static inline void Eagle3X(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; magick_unreferenced(source); corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); if (corner_tl && corner_tr) Mix2Pixels(pixels,0,2,result,1,channels); else CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); if (corner_tl && corner_bl) Mix2Pixels(pixels,0,6,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if (corner_tr && corner_br) Mix2Pixels(pixels,2,8,result,5,channels); else CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); if (corner_bl && corner_br) Mix2Pixels(pixels,6,8,result,7,channels); else CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Eagle3XB(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { ssize_t corner_tl, corner_tr, corner_bl, corner_br; magick_unreferenced(source); corner_tl=PixelsEqual(pixels,0,pixels,1,channels) && PixelsEqual(pixels,0,pixels,3,channels); corner_tr=PixelsEqual(pixels,1,pixels,2,channels) && PixelsEqual(pixels,2,pixels,5,channels); corner_bl=PixelsEqual(pixels,3,pixels,6,channels) && PixelsEqual(pixels,6,pixels,7,channels); corner_br=PixelsEqual(pixels,5,pixels,7,channels) && PixelsEqual(pixels,7,pixels,8,channels); CopyPixels(pixels,(ssize_t) (corner_tl ? 0 : 4),result,0,channels); CopyPixels(pixels,4,result,1,channels); CopyPixels(pixels,(ssize_t) (corner_tr ? 1 : 4),result,2,channels); CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); CopyPixels(pixels,4,result,5,channels); CopyPixels(pixels,(ssize_t) (corner_bl ? 3 : 4),result,6,channels); CopyPixels(pixels,4,result,7,channels); CopyPixels(pixels,(ssize_t) (corner_br ? 5 : 4),result,8,channels); } static inline void Scale3X(const Image *magick_unused(source), const Quantum *pixels,Quantum *result,const size_t channels) { magick_unreferenced(source); if (!PixelsEqual(pixels,1,pixels,7,channels) && !PixelsEqual(pixels,3,pixels,5,channels)) { if (PixelsEqual(pixels,3,pixels,1,channels)) CopyPixels(pixels,3,result,0,channels); else CopyPixels(pixels,4,result,0,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) || ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,1,result,1,channels); else CopyPixels(pixels,4,result,1,channels); if (PixelsEqual(pixels,5,pixels,1,channels)) CopyPixels(pixels,5,result,2,channels); else CopyPixels(pixels,4,result,2,channels); if ( ( PixelsEqual(pixels,3,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) || ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,0,channels) ) ) CopyPixels(pixels,3,result,3,channels); else CopyPixels(pixels,4,result,3,channels); CopyPixels(pixels,4,result,4,channels); if ( ( PixelsEqual(pixels,5,pixels,1,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,2,channels) ) ) CopyPixels(pixels,5,result,5,channels); else CopyPixels(pixels,4,result,5,channels); if (PixelsEqual(pixels,3,pixels,7,channels)) CopyPixels(pixels,3,result,6,channels); else CopyPixels(pixels,4,result,6,channels); if ( ( PixelsEqual(pixels,3,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,8,channels) ) || ( PixelsEqual(pixels,5,pixels,7,channels) && !PixelsEqual(pixels,4,pixels,6,channels) ) ) CopyPixels(pixels,7,result,7,channels); else CopyPixels(pixels,4,result,7,channels); if (PixelsEqual(pixels,5,pixels,7,channels)) CopyPixels(pixels,5,result,8,channels); else CopyPixels(pixels,4,result,8,channels); } else { ssize_t i; for (i=0; i < 9; i++) CopyPixels(pixels,4,result,i,channels); } } MagickExport Image *MagnifyImage(const Image *image,ExceptionInfo *exception) { #define MagnifyImageTag "Magnify/Image" CacheView *image_view, *magnify_view; const char *option; Image *source_image, *magnify_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo rectangle; ssize_t y; unsigned char magnification, width; void (*scaling_method)(const Image *,const Quantum *,Quantum *,size_t); /* Initialize magnified image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); option=GetImageOption(image->image_info,"magnify:method"); if (option == (char *) NULL) option="scale2x"; scaling_method=Scale2X; magnification=1; width=1; switch (*option) { case 'e': { if (LocaleCompare(option,"eagle2x") == 0) { scaling_method=Eagle2X; magnification=2; width=3; break; } if (LocaleCompare(option,"eagle3x") == 0) { scaling_method=Eagle3X; magnification=3; width=3; break; } if (LocaleCompare(option,"eagle3xb") == 0) { scaling_method=Eagle3XB; magnification=3; width=3; break; } if (LocaleCompare(option,"epbx2x") == 0) { scaling_method=Epbx2X; magnification=2; width=3; break; } break; } case 'f': { if (LocaleCompare(option,"fish2x") == 0) { scaling_method=Fish2X; magnification=2; width=3; break; } break; } case 'h': { if (LocaleCompare(option,"hq2x") == 0) { scaling_method=Hq2X; magnification=2; width=3; break; } break; } case 's': { if (LocaleCompare(option,"scale2x") == 0) { scaling_method=Scale2X; magnification=2; width=3; break; } if (LocaleCompare(option,"scale3x") == 0) { scaling_method=Scale3X; magnification=3; width=3; break; } break; } case 'x': { if (LocaleCompare(option,"xbr2x") == 0) { scaling_method=Xbr2X; magnification=2; width=5; } break; } default: break; } /* Make a working copy of the source image and convert it to RGB colorspace. */ source_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (source_image == (Image *) NULL) return((Image *) NULL); offset.x=0; offset.y=0; rectangle.x=0; rectangle.y=0; rectangle.width=image->columns; rectangle.height=image->rows; (void) CopyImagePixels(source_image,image,&rectangle,&offset,exception); (void) SetImageColorspace(source_image,RGBColorspace,exception); magnify_image=CloneImage(source_image,magnification*source_image->columns, magnification*source_image->rows,MagickTrue,exception); if (magnify_image == (Image *) NULL) { source_image=DestroyImage(source_image); return((Image *) NULL); } /* Magnify the image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(source_image,exception); magnify_view=AcquireAuthenticCacheView(magnify_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(source_image,magnify_image,source_image->rows,1) #endif for (y=0; y < (ssize_t) source_image->rows; y++) { Quantum r[128]; /* to hold result pixels */ Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(magnify_view,0,magnification*y, magnify_image->columns,magnification,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } /* Magnify this row of pixels. */ for (x=0; x < (ssize_t) source_image->columns; x++) { const Quantum *magick_restrict p; size_t channels; ssize_t i; ssize_t j; p=GetCacheViewVirtualPixels(image_view,x-width/2,y-width/2,width,width, exception); channels=GetPixelChannels(source_image); scaling_method(source_image,p,r,channels); /* Copy the result pixels into the final image. */ for (j=0; j < (ssize_t) magnification; j++) for (i=0; i < (ssize_t) (channels*magnification); i++) q[j*channels*magnify_image->columns+i]=r[j*magnification*channels+i]; q+=magnification*GetPixelChannels(magnify_image); } if (SyncCacheViewAuthenticPixels(magnify_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,MagnifyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } magnify_view=DestroyCacheView(magnify_view); image_view=DestroyCacheView(image_view); source_image=DestroyImage(source_image); if (status == MagickFalse) magnify_image=DestroyImage(magnify_image); return(magnify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinifyImage() is a convenience method that scales an image proportionally to % half its size. % % The format of the MinifyImage method is: % % Image *MinifyImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinifyImage(const Image *image,ExceptionInfo *exception) { Image *minify_image; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); minify_image=ResizeImage(image,image->columns/2,image->rows/2,SplineFilter, exception); return(minify_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResampleImage() resize image in terms of its pixel size, so that when % displayed at the given resolution it will be the same size in terms of % real world units as the original image at the original resolution. % % The format of the ResampleImage method is: % % Image *ResampleImage(Image *image,const double x_resolution, % const double y_resolution,const FilterType filter, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be resized to fit the given resolution. % % o x_resolution: the new image x resolution. % % o y_resolution: the new image y resolution. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ResampleImage(const Image *image,const double x_resolution, const double y_resolution,const FilterType filter,ExceptionInfo *exception) { #define ResampleImageTag "Resample/Image" Image *resample_image; size_t height, width; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); width=(size_t) (x_resolution*image->columns/(image->resolution.x == 0.0 ? DefaultResolution : image->resolution.x)+0.5); height=(size_t) (y_resolution*image->rows/(image->resolution.y == 0.0 ? DefaultResolution : image->resolution.y)+0.5); resample_image=ResizeImage(image,width,height,filter,exception); if (resample_image != (Image *) NULL) { resample_image->resolution.x=x_resolution; resample_image->resolution.y=y_resolution; } return(resample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeImage() scales an image to the desired dimensions, using the given % filter (see AcquireFilterInfo()). % % If an undefined filter is given the filter defaults to Mitchell for a % colormapped image, a image with a matte channel, or if the image is % enlarged. Otherwise the filter defaults to a Lanczos. % % ResizeImage() was inspired by Paul Heckbert's "zoom" program. % % The format of the ResizeImage method is: % % Image *ResizeImage(Image *image,const size_t columns,const size_t rows, % const FilterType filter,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o filter: Image filter to use. % % o exception: return any errors or warnings in this structure. % */ typedef struct _ContributionInfo { double weight; ssize_t pixel; } ContributionInfo; static ContributionInfo **DestroyContributionThreadSet( ContributionInfo **contribution) { ssize_t i; assert(contribution != (ContributionInfo **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (contribution[i] != (ContributionInfo *) NULL) contribution[i]=(ContributionInfo *) RelinquishAlignedMemory( contribution[i]); contribution=(ContributionInfo **) RelinquishMagickMemory(contribution); return(contribution); } static ContributionInfo **AcquireContributionThreadSet(const size_t count) { ssize_t i; ContributionInfo **contribution; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); contribution=(ContributionInfo **) AcquireQuantumMemory(number_threads, sizeof(*contribution)); if (contribution == (ContributionInfo **) NULL) return((ContributionInfo **) NULL); (void) memset(contribution,0,number_threads*sizeof(*contribution)); for (i=0; i < (ssize_t) number_threads; i++) { contribution[i]=(ContributionInfo *) MagickAssumeAligned( AcquireAlignedMemory(count,sizeof(**contribution))); if (contribution[i] == (ContributionInfo *) NULL) return(DestroyContributionThreadSet(contribution)); } return(contribution); } static MagickBooleanType HorizontalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double x_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { #define ResizeImageTag "Resize/Image" CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; MagickBooleanType status; double scale, support; ssize_t x; /* Apply filter to resize horizontally from image to resize image. */ scale=MagickMax(1.0/x_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->columns,1) #endif for (x=0; x < (ssize_t) resize_image->columns; x++) { const int id = GetOpenMPThreadId(); double bisect, density; const Quantum *magick_restrict p; ContributionInfo *magick_restrict contribution; Quantum *magick_restrict q; ssize_t y; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (x+0.5)/x_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->columns); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,contribution[0].pixel,0,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1),image->rows,exception); q=QueueCacheViewAuthenticPixels(resize_view,x,0,1,resize_image->rows, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (y=0; y < (ssize_t) resize_image->rows; y++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j-start].pixel-contribution[0].pixel); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } /* Alpha blending. */ gamma=0.0; for (j=0; j < n; j++) { k=y*(contribution[n-1].pixel-contribution[0].pixel+1)+ (contribution[j].pixel-contribution[0].pixel); alpha=contribution[j].weight*QuantumScale* GetPixelAlpha(image,p+k*GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } static MagickBooleanType VerticalFilter( const ResizeFilter *magick_restrict resize_filter, const Image *magick_restrict image,Image *magick_restrict resize_image, const double y_factor,const MagickSizeType span, MagickOffsetType *magick_restrict progress,ExceptionInfo *exception) { CacheView *image_view, *resize_view; ClassType storage_class; ContributionInfo **magick_restrict contributions; double scale, support; MagickBooleanType status; ssize_t y; /* Apply filter to resize vertically from image to resize image. */ scale=MagickMax(1.0/y_factor+MagickEpsilon,1.0); support=scale*GetResizeFilterSupport(resize_filter); storage_class=support > 0.5 ? DirectClass : image->storage_class; if (SetImageStorageClass(resize_image,storage_class,exception) == MagickFalse) return(MagickFalse); if (support < 0.5) { /* Support too small even for nearest neighbour: Reduce to point sampling. */ support=(double) 0.5; scale=1.0; } contributions=AcquireContributionThreadSet((size_t) (2.0*support+3.0)); if (contributions == (ContributionInfo **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } status=MagickTrue; scale=PerceptibleReciprocal(scale); image_view=AcquireVirtualCacheView(image,exception); resize_view=AcquireAuthenticCacheView(resize_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,resize_image,resize_image->rows,1) #endif for (y=0; y < (ssize_t) resize_image->rows; y++) { const int id = GetOpenMPThreadId(); double bisect, density; const Quantum *magick_restrict p; ContributionInfo *magick_restrict contribution; Quantum *magick_restrict q; ssize_t x; ssize_t n, start, stop; if (status == MagickFalse) continue; bisect=(double) (y+0.5)/y_factor+MagickEpsilon; start=(ssize_t) MagickMax(bisect-support+0.5,0.0); stop=(ssize_t) MagickMin(bisect+support+0.5,(double) image->rows); density=0.0; contribution=contributions[id]; for (n=0; n < (stop-start); n++) { contribution[n].pixel=start+n; contribution[n].weight=GetResizeFilterWeight(resize_filter,scale* ((double) (start+n)-bisect+0.5)); density+=contribution[n].weight; } if (n == 0) continue; if ((density != 0.0) && (density != 1.0)) { ssize_t i; /* Normalize. */ density=PerceptibleReciprocal(density); for (i=0; i < n; i++) contribution[i].weight*=density; } p=GetCacheViewVirtualPixels(image_view,0,contribution[0].pixel, image->columns,(size_t) (contribution[n-1].pixel-contribution[0].pixel+1), exception); q=QueueCacheViewAuthenticPixels(resize_view,0,y,resize_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) resize_image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double alpha, gamma, pixel; PixelChannel channel; PixelTrait resize_traits, traits; ssize_t j; ssize_t k; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); resize_traits=GetPixelChannelTraits(resize_image,channel); if ((traits == UndefinedPixelTrait) || (resize_traits == UndefinedPixelTrait)) continue; if (((resize_traits & CopyPixelTrait) != 0) || (GetPixelWriteMask(resize_image,q) <= (QuantumRange/2))) { j=(ssize_t) (MagickMin(MagickMax(bisect,(double) start),(double) stop-1.0)+0.5); k=(ssize_t) ((contribution[j-start].pixel-contribution[0].pixel)* image->columns+x); SetPixelChannel(resize_image,channel,p[k*GetPixelChannels(image)+i], q); continue; } pixel=0.0; if ((resize_traits & BlendPixelTrait) == 0) { /* No alpha blending. */ for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight; pixel+=alpha*p[k*GetPixelChannels(image)+i]; } SetPixelChannel(resize_image,channel,ClampToQuantum(pixel),q); continue; } gamma=0.0; for (j=0; j < n; j++) { k=(ssize_t) ((contribution[j].pixel-contribution[0].pixel)* image->columns+x); alpha=contribution[j].weight*QuantumScale*GetPixelAlpha(image,p+k* GetPixelChannels(image)); pixel+=alpha*p[k*GetPixelChannels(image)+i]; gamma+=alpha; } gamma=PerceptibleReciprocal(gamma); SetPixelChannel(resize_image,channel,ClampToQuantum(gamma*pixel),q); } q+=GetPixelChannels(resize_image); } if (SyncCacheViewAuthenticPixels(resize_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif (*progress)++; proceed=SetImageProgress(image,ResizeImageTag,*progress,span); if (proceed == MagickFalse) status=MagickFalse; } } resize_view=DestroyCacheView(resize_view); image_view=DestroyCacheView(image_view); contributions=DestroyContributionThreadSet(contributions); return(status); } MagickExport Image *ResizeImage(const Image *image,const size_t columns, const size_t rows,const FilterType filter,ExceptionInfo *exception) { double x_factor, y_factor; FilterType filter_type; Image *filter_image, *resize_image; MagickOffsetType offset; MagickSizeType span; MagickStatusType status; ResizeFilter *resize_filter; /* Acquire resize image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows) && (filter == UndefinedFilter)) return(CloneImage(image,0,0,MagickTrue,exception)); /* Acquire resize filter. */ x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; filter_type=LanczosFilter; if (filter != UndefinedFilter) filter_type=filter; else if ((x_factor == 1.0) && (y_factor == 1.0)) filter_type=PointFilter; else if ((image->storage_class == PseudoClass) || (image->alpha_trait != UndefinedPixelTrait) || ((x_factor*y_factor) > 1.0)) filter_type=MitchellFilter; resize_filter=AcquireResizeFilter(image,filter_type,MagickFalse,exception); #if defined(MAGICKCORE_OPENCL_SUPPORT) resize_image=AccelerateResizeImage(image,columns,rows,resize_filter, exception); if (resize_image != (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } #endif resize_image=CloneImage(image,columns,rows,MagickTrue,exception); if (resize_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(resize_image); } if (x_factor > y_factor) filter_image=CloneImage(image,columns,image->rows,MagickTrue,exception); else filter_image=CloneImage(image,image->columns,rows,MagickTrue,exception); if (filter_image == (Image *) NULL) { resize_filter=DestroyResizeFilter(resize_filter); return(DestroyImage(resize_image)); } /* Resize image. */ offset=0; if (x_factor > y_factor) { span=(MagickSizeType) (filter_image->columns+rows); status=HorizontalFilter(resize_filter,image,filter_image,x_factor,span, &offset,exception); status&=VerticalFilter(resize_filter,filter_image,resize_image,y_factor, span,&offset,exception); } else { span=(MagickSizeType) (filter_image->rows+columns); status=VerticalFilter(resize_filter,image,filter_image,y_factor,span, &offset,exception); status&=HorizontalFilter(resize_filter,filter_image,resize_image,x_factor, span,&offset,exception); } /* Free resources. */ filter_image=DestroyImage(filter_image); resize_filter=DestroyResizeFilter(resize_filter); if (status == MagickFalse) { resize_image=DestroyImage(resize_image); return((Image *) NULL); } resize_image->type=image->type; return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S a m p l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SampleImage() scales an image to the desired dimensions with pixel % sampling. Unlike other scaling methods, this method does not introduce % any additional color into the scaled image. % % The format of the SampleImage method is: % % Image *SampleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the sampled image. % % o rows: the number of rows in the sampled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SampleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleImageTag "Sample/Image" CacheView *image_view, *sample_view; Image *sample_image; MagickBooleanType status; MagickOffsetType progress; ssize_t x1; ssize_t *x_offset, y; PointInfo sample_offset; /* Initialize sampled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); sample_image=CloneImage(image,columns,rows,MagickTrue,exception); if (sample_image == (Image *) NULL) return((Image *) NULL); /* Set the sampling offset, default is in the mid-point of sample regions. */ sample_offset.x=sample_offset.y=0.5-MagickEpsilon; { const char *value; value=GetImageArtifact(image,"sample:offset"); if (value != (char *) NULL) { GeometryInfo geometry_info; MagickStatusType flags; (void) ParseGeometry(value,&geometry_info); flags=ParseGeometry(value,&geometry_info); sample_offset.x=sample_offset.y=geometry_info.rho/100.0-MagickEpsilon; if ((flags & SigmaValue) != 0) sample_offset.y=geometry_info.sigma/100.0-MagickEpsilon; } } /* Allocate scan line buffer and column offset buffers. */ x_offset=(ssize_t *) AcquireQuantumMemory((size_t) sample_image->columns, sizeof(*x_offset)); if (x_offset == (ssize_t *) NULL) { sample_image=DestroyImage(sample_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } for (x1=0; x1 < (ssize_t) sample_image->columns; x1++) x_offset[x1]=(ssize_t) ((((double) x1+sample_offset.x)*image->columns)/ sample_image->columns); /* Sample each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); sample_view=AcquireAuthenticCacheView(sample_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,sample_image,sample_image->rows,1) #endif for (y=0; y < (ssize_t) sample_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; ssize_t y_offset; if (status == MagickFalse) continue; y_offset=(ssize_t) ((((double) y+sample_offset.y)*image->rows)/ sample_image->rows); p=GetCacheViewVirtualPixels(image_view,0,y_offset,image->columns,1, exception); q=QueueCacheViewAuthenticPixels(sample_view,0,y,sample_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } /* Sample each column. */ for (x=0; x < (ssize_t) sample_image->columns; x++) { ssize_t i; if (GetPixelWriteMask(sample_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(sample_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(sample_image); i++) { PixelChannel channel; PixelTrait image_traits, traits; channel=GetPixelChannelChannel(sample_image,i); traits=GetPixelChannelTraits(sample_image,channel); image_traits=GetPixelChannelTraits(image,channel); if ((traits == UndefinedPixelTrait) || (image_traits == UndefinedPixelTrait)) continue; SetPixelChannel(sample_image,channel,p[x_offset[x]*GetPixelChannels( image)+i],q); } q+=GetPixelChannels(sample_image); } if (SyncCacheViewAuthenticPixels(sample_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,SampleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); sample_view=DestroyCacheView(sample_view); x_offset=(ssize_t *) RelinquishMagickMemory(x_offset); sample_image->type=image->type; if (status == MagickFalse) sample_image=DestroyImage(sample_image); return(sample_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleImage() changes the size of an image to the given dimensions. % % The format of the ScaleImage method is: % % Image *ScaleImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ScaleImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define ScaleImageTag "Scale/Image" CacheView *image_view, *scale_view; double alpha, pixel[CompositePixelChannel], *scale_scanline, *scanline, *x_vector, *y_vector; Image *scale_image; MagickBooleanType next_column, next_row, proceed, status; PixelTrait scale_traits; PointInfo scale, span; ssize_t i; ssize_t n, number_rows, y; /* Initialize scaled image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) ThrowImageException(ImageError,"NegativeOrZeroImageSize"); if ((columns == image->columns) && (rows == image->rows)) return(CloneImage(image,0,0,MagickTrue,exception)); scale_image=CloneImage(image,columns,rows,MagickTrue,exception); if (scale_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(scale_image,DirectClass,exception) == MagickFalse) { scale_image=DestroyImage(scale_image); return((Image *) NULL); } /* Allocate memory. */ x_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*x_vector)); scanline=x_vector; if (image->rows != scale_image->rows) scanline=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*scanline)); scale_scanline=(double *) AcquireQuantumMemory((size_t) scale_image->columns, MaxPixelChannels*sizeof(*scale_scanline)); y_vector=(double *) AcquireQuantumMemory((size_t) image->columns, MaxPixelChannels*sizeof(*y_vector)); if ((scanline == (double *) NULL) || (scale_scanline == (double *) NULL) || (x_vector == (double *) NULL) || (y_vector == (double *) NULL)) { if ((image->rows != scale_image->rows) && (scanline != (double *) NULL)) scanline=(double *) RelinquishMagickMemory(scanline); if (scale_scanline != (double *) NULL) scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (x_vector != (double *) NULL) x_vector=(double *) RelinquishMagickMemory(x_vector); if (y_vector != (double *) NULL) y_vector=(double *) RelinquishMagickMemory(y_vector); scale_image=DestroyImage(scale_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Scale image. */ number_rows=0; next_row=MagickTrue; span.y=1.0; scale.y=(double) scale_image->rows/(double) image->rows; (void) memset(y_vector,0,(size_t) MaxPixelChannels*image->columns* sizeof(*y_vector)); n=0; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); scale_view=AcquireAuthenticCacheView(scale_image,exception); for (y=0; y < (ssize_t) scale_image->rows; y++) { const Quantum *magick_restrict p; Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) break; q=QueueCacheViewAuthenticPixels(scale_view,0,y,scale_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } alpha=1.0; if (scale_image->rows == image->rows) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } } else { /* Scale Y direction. */ while (scale.y < span.y) { if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; } for (x=0; x < (ssize_t) image->columns; x++) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) y_vector[x*GetPixelChannels(image)+i]+=scale.y* x_vector[x*GetPixelChannels(image)+i]; span.y-=scale.y; scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } if ((next_row != MagickFalse) && (number_rows < (ssize_t) image->rows)) { /* Read a new scanline. */ p=GetCacheViewVirtualPixels(image_view,0,n++,image->columns,1, exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,p) <= (QuantumRange/2)) { p+=GetPixelChannels(image); continue; } if (image->alpha_trait != UndefinedPixelTrait) alpha=QuantumScale*GetPixelAlpha(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & BlendPixelTrait) == 0) { x_vector[x*GetPixelChannels(image)+i]=(double) p[i]; continue; } x_vector[x*GetPixelChannels(image)+i]=alpha*p[i]; } p+=GetPixelChannels(image); } number_rows++; next_row=MagickFalse; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { pixel[i]=y_vector[x*GetPixelChannels(image)+i]+span.y* x_vector[x*GetPixelChannels(image)+i]; scanline[x*GetPixelChannels(image)+i]=pixel[i]; y_vector[x*GetPixelChannels(image)+i]=0.0; } } scale.y-=span.y; if (scale.y <= 0) { scale.y=(double) scale_image->rows/(double) image->rows; next_row=MagickTrue; } span.y=1.0; } if (scale_image->columns == image->columns) { /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha*scanline[ x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } else { ssize_t t; /* Scale X direction. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; span.x=1.0; t=0; for (x=0; x < (ssize_t) image->columns; x++) { scale.x=(double) scale_image->columns/(double) image->columns; while (scale.x >= span.x) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; pixel[i]+=span.x*scanline[x*GetPixelChannels(image)+i]; scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; } scale.x-=span.x; span.x=1.0; next_column=MagickTrue; } if (scale.x > 0) { if (next_column != MagickFalse) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]=0.0; next_column=MagickFalse; t++; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=scale.x*scanline[x*GetPixelChannels(image)+i]; span.x-=scale.x; } } if (span.x > 0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) pixel[i]+=span.x*scanline[(x-1)*GetPixelChannels(image)+i]; } if ((next_column == MagickFalse) && (t < (ssize_t) scale_image->columns)) for (i=0; i < (ssize_t) GetPixelChannels(image); i++) scale_scanline[t*GetPixelChannels(image)+i]=pixel[i]; /* Transfer scanline to scaled image. */ for (x=0; x < (ssize_t) scale_image->columns; x++) { if (GetPixelWriteMask(scale_image,q) <= (QuantumRange/2)) { q+=GetPixelChannels(scale_image); continue; } if (image->alpha_trait != UndefinedPixelTrait) { alpha=QuantumScale*scale_scanline[x*GetPixelChannels(image)+ GetPixelChannelOffset(image,AlphaPixelChannel)]; alpha=PerceptibleReciprocal(alpha); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); scale_traits=GetPixelChannelTraits(scale_image,channel); if ((traits == UndefinedPixelTrait) || (scale_traits == UndefinedPixelTrait)) continue; if ((traits & BlendPixelTrait) == 0) { SetPixelChannel(scale_image,channel,ClampToQuantum( scale_scanline[x*GetPixelChannels(image)+i]),q); continue; } SetPixelChannel(scale_image,channel,ClampToQuantum(alpha* scale_scanline[x*GetPixelChannels(image)+i]),q); } q+=GetPixelChannels(scale_image); } } if (SyncCacheViewAuthenticPixels(scale_view,exception) == MagickFalse) { status=MagickFalse; break; } proceed=SetImageProgress(image,ScaleImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) { status=MagickFalse; break; } } scale_view=DestroyCacheView(scale_view); image_view=DestroyCacheView(image_view); /* Free allocated memory. */ y_vector=(double *) RelinquishMagickMemory(y_vector); scale_scanline=(double *) RelinquishMagickMemory(scale_scanline); if (scale_image->rows != image->rows) scanline=(double *) RelinquishMagickMemory(scanline); x_vector=(double *) RelinquishMagickMemory(x_vector); scale_image->type=image->type; if (status == MagickFalse) scale_image=DestroyImage(scale_image); return(scale_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h u m b n a i l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThumbnailImage() changes the size of an image to the given dimensions and % removes any associated profiles. The goal is to produce small low cost % thumbnail images suited for display on the Web. % % The format of the ThumbnailImage method is: % % Image *ThumbnailImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the scaled image. % % o rows: the number of rows in the scaled image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ThumbnailImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { #define SampleFactor 5 char filename[MagickPathExtent], value[MagickPathExtent]; const char *name; Image *thumbnail_image; double x_factor, y_factor; struct stat attributes; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); x_factor=(double) columns/(double) image->columns; y_factor=(double) rows/(double) image->rows; if ((x_factor*y_factor) > 0.1) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else if (((SampleFactor*columns) < 128) || ((SampleFactor*rows) < 128)) thumbnail_image=ResizeImage(image,columns,rows,image->filter,exception); else { Image *sample_image; sample_image=SampleImage(image,SampleFactor*columns,SampleFactor*rows, exception); if (sample_image == (Image *) NULL) return((Image *) NULL); thumbnail_image=ResizeImage(sample_image,columns,rows,image->filter, exception); sample_image=DestroyImage(sample_image); } if (thumbnail_image == (Image *) NULL) return(thumbnail_image); (void) ParseAbsoluteGeometry("0x0+0+0",&thumbnail_image->page); if (thumbnail_image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(thumbnail_image,OpaqueAlphaChannel,exception); thumbnail_image->depth=8; thumbnail_image->interlace=NoInterlace; /* Strip all profiles except color profiles. */ ResetImageProfileIterator(thumbnail_image); for (name=GetNextImageProfile(thumbnail_image); name != (const char *) NULL; ) { if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) { (void) DeleteImageProfile(thumbnail_image,name); ResetImageProfileIterator(thumbnail_image); } name=GetNextImageProfile(thumbnail_image); } (void) DeleteImageProperty(thumbnail_image,"comment"); (void) CopyMagickString(value,image->magick_filename,MagickPathExtent); if (strstr(image->magick_filename,"//") == (char *) NULL) (void) FormatLocaleString(value,MagickPathExtent,"file://%s", image->magick_filename); (void) SetImageProperty(thumbnail_image,"Thumb::URI",value,exception); GetPathComponent(image->magick_filename,TailPath,filename); (void) CopyMagickString(value,filename,MagickPathExtent); if ( GetPathAttributes(image->filename,&attributes) != MagickFalse ) (void) FormatImageProperty(thumbnail_image,"Thumb::MTime","%.20g",(double) attributes.st_mtime); (void) FormatLocaleString(value,MagickPathExtent,"%.20g",(double) attributes.st_mtime); (void) FormatMagickSize(GetBlobSize(image),MagickFalse,"B",MagickPathExtent, value); (void) SetImageProperty(thumbnail_image,"Thumb::Size",value,exception); (void) FormatLocaleString(value,MagickPathExtent,"image/%s",image->magick); LocaleLower(value); (void) SetImageProperty(thumbnail_image,"Thumb::Mimetype",value,exception); (void) SetImageProperty(thumbnail_image,"software",MagickAuthoritativeURL, exception); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Width","%.20g", (double) image->magick_columns); (void) FormatImageProperty(thumbnail_image,"Thumb::Image::Height","%.20g", (double) image->magick_rows); (void) FormatImageProperty(thumbnail_image,"Thumb::Document::Pages","%.20g", (double) GetImageListLength(image)); return(thumbnail_image); }
omp_loop.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #define N 100 int main(int argc, char *argv[]) { int nthreads, tid, i; float a[N], b[N], c[N]; // some initializations for(i=0; i<N; i++) { a[i] = b[i] = i; } #pragma omp parallel shared(a, b, c, nthreads) private(i, tid) { tid = omp_get_thread_num(); if(tid == 0) { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread $d starting...\n", tid); #pragma omp for for(i=0; i<N; i++) { c[i] = a[i] + b[i]; printf("Thread %d: c[%d] = %f]\n", tid, i, c[i]); } } // end of parallel section }
testis.c
/*[]*/ struct __sFILEX ; /*[]*/ int printf(const char *restrict , ...); /*[]*/ extern void timer_clear(int ); /*[]*/ extern void timer_start(int ); /*[]*/ extern void timer_stop(int ); /*[]*/ extern double timer_read(int ); /*[]*/ extern void c_print_results(char *name, char class , int n1 , int n2 , int n3 , int niter , int nthreads , double t , double mops , char *optype , int passed_verification , char *npbversion , char *compiletime , char *cc , char *clink , char *c_lib , char *c_inc , char *cflags , char *clinkflags , char *rand); /*[]*/ typedef int INT_TYPE; /*[]*/ INT_TYPE *key_buff_ptr_global; /*[]*/ int passed_verification; /*[]*/ INT_TYPE key_array[(1 << 16)]; /*[]*/ INT_TYPE key_buff1[(1 << 16)]; /*[]*/ INT_TYPE key_buff2[(1 << 16)]; /*[]*/ INT_TYPE partial_verify_vals[5]; /*[]*/ INT_TYPE test_index_array[5]; /*[]*/ INT_TYPE test_rank_array[5]; /*[]*/ INT_TYPE S_test_index_array[5] = {48427, 17148 , 23627 , 62548 , 4431}; /*[]*/ INT_TYPE S_test_rank_array[5] = {0, 18 , 346 , 64917 , 65463}; /*[]*/ INT_TYPE W_test_index_array[5] = {357773, 934767 , 875723 , 898999 , 404505}; /*[]*/ INT_TYPE W_test_rank_array[5] = {1249, 11698 , 1039987 , 1043896 , 1048018}; /*[]*/ INT_TYPE A_test_index_array[5] = {2112377, 662041 , 5336171 , 3642833 , 4250760}; /*[]*/ INT_TYPE A_test_rank_array[5] = {104, 17523 , 123928 , 8288932 , 8388264}; /*[]*/ INT_TYPE B_test_index_array[5] = {41869, 812306 , 5102857 , 18232239 , 26860214}; /*[]*/ INT_TYPE B_test_rank_array[5] = {33422937, 10244 , 59149 , 33135281 , 99}; /*[]*/ INT_TYPE C_test_index_array[5] = {44172927, 72999161 , 74326391 , 129606274 , 21736814}; /*[]*/ INT_TYPE C_test_rank_array[5] = {61147, 882988 , 266290 , 133997595 , 133525895}; /*[]*/ double randlc(double *X, double *A); /*[]*/ void full_verify(void ); /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ double randlc(double *X, double *A) { /*[1; ]*/ /*[1; ]*/ static int KS = 0; /*[1; ]*/ static double R23; /*[1; ]*/ static double R46; /*[1; ]*/ static double T23; /*[1; ]*/ static double T46; /*[1; ]*/ double T1; /*[1; ]*/ double T2; /*[1; ]*/ double T3; /*[1; ]*/ double T4; /*[1; ]*/ double A1; /*[1; ]*/ double A2; /*[1; ]*/ double X1; /*[1; ]*/ double X2; /*[1; ]*/ double Z; /*[1; ]*/ int i; /*[1; ]*/ int j; /*[1; ]*/ /*[1; ]*/ if (KS == 0) { /*[1; ]*/ /*[1; ]*/ R23 = 1.0; /*[1; ]*/ R46 = 1.0; /*[1; ]*/ T23 = 1.0; /*[1; ]*/ T46 = 1.0; /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ for (i = 1; i <= 23; i++) { /*[1; ]*/ /*[1; ]*/ R23 = 0.50 * R23; /*[1; ]*/ T23 = 2.0 * T23; } /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ for (i = 1; i <= 46; i++) { /*[1; ]*/ /*[1; ]*/ R46 = 0.50 * R46; /*[1; ]*/ T46 = 2.0 * T46; } /*[1; ]*/ KS = 1; } /*[1; ]*/ T1 = R23 * *A; /*[1; ]*/ j = T1; /*[1; ]*/ A1 = j; /*[1; ]*/ A2 = *A - T23 * A1; /*[1; ]*/ T1 = R23 * *X; /*[1; ]*/ j = T1; /*[1; ]*/ X1 = j; /*[1; ]*/ X2 = *X - T23 * X1; /*[1; ]*/ T1 = A1 * X2 + A2 * X1; /*[1; ]*/ j = R23 * T1; /*[1; ]*/ T2 = j; /*[1; ]*/ Z = T1 - T23 * T2; /*[1; ]*/ T3 = T23 * Z + A2 * X2; /*[1; ]*/ j = R46 * T3; /*[1; ]*/ T4 = j; /*[1; ]*/ *X = T3 - T46 * T4; /*[1; ]*/ return (R46 * *X); } /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ void create_seq(double seed, double a) { /*[1; ]*/ /*[1; ]*/ double x; /*[1; ]*/ int i; /*[1; ]*/ int k; /*[1; ]*/ k = (1 << 11) / 4; /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ for (i = 0; i < (1 << 16); i++) { /*[1; ]*/ /*[1; ]*/ double *_imopVarPre16; /*[1; ]*/ double *_imopVarPre17; /*[1; ]*/ double _imopVarPre18; /*[1; ]*/ _imopVarPre16 = &a; /*[1; ]*/ _imopVarPre17 = &seed; /*[1; ]*/ _imopVarPre18 = randlc(_imopVarPre17, _imopVarPre16); /*[1; ]*/ /*[1; ]*/ x = _imopVarPre18; /*[1; ]*/ double *_imopVarPre21; /*[1; ]*/ double *_imopVarPre22; /*[1; ]*/ double _imopVarPre23; /*[1; ]*/ _imopVarPre21 = &a; /*[1; ]*/ _imopVarPre22 = &seed; /*[1; ]*/ _imopVarPre23 = randlc(_imopVarPre22, _imopVarPre21); /*[1; ]*/ /*[1; ]*/ x += _imopVarPre23; /*[1; ]*/ double *_imopVarPre26; /*[1; ]*/ double *_imopVarPre27; /*[1; ]*/ double _imopVarPre28; /*[1; ]*/ _imopVarPre26 = &a; /*[1; ]*/ _imopVarPre27 = &seed; /*[1; ]*/ _imopVarPre28 = randlc(_imopVarPre27, _imopVarPre26); /*[1; ]*/ /*[1; ]*/ x += _imopVarPre28; /*[1; ]*/ double *_imopVarPre31; /*[1; ]*/ double *_imopVarPre32; /*[1; ]*/ double _imopVarPre33; /*[1; ]*/ _imopVarPre31 = &a; /*[1; ]*/ _imopVarPre32 = &seed; /*[1; ]*/ _imopVarPre33 = randlc(_imopVarPre32, _imopVarPre31); /*[1; ]*/ /*[1; ]*/ x += _imopVarPre33; /*[1; ]*/ key_array[i] = k * x; } } /*[8; 16; ]*/ void full_verify() { /*[8; 16; ]*/ /*[8; 16; ]*/ INT_TYPE i; /*[8; 16; ]*/ INT_TYPE j; /*[8; 16; ]*/ /*[8; 16; ]*/ /*[8; 16; ]*/ /*[8; 16; ]*/ for (i = 0; i < (1 << 16); i++) { /*[8; 16; ]*/ /*[8; 16; ]*/ key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i]; } /*[8; 16; ]*/ j = 0; /*[8; 16; ]*/ /*[8; 16; ]*/ /*[8; 16; ]*/ /*[8; 16; ]*/ for (i = 1; i < (1 << 16); i++) { /*[8; 16; ]*/ /*[8; 16; ]*/ /*[8; 16; ]*/ if (key_array[i - 1] > key_array[i]) { /*[8; 16; ]*/ /*[8; 16; ]*/ j++; } } /*[8; 16; ]*/ /*[8; 16; ]*/ if (j != 0) { /*[8; 16; ]*/ /*[8; 16; ]*/ printf("Full_verify: number of keys out of sort: %d\n", j); /*[8; 16; ]*/ } else { /*[8; 16; ]*/ /*[8; 16; ]*/ passed_verification++; } } /*[4; ]*/ /*[4; ]*/ void rank(int iteration) { /*[4; ]*/ /*[4; ]*/ INT_TYPE i; /*[4; ]*/ INT_TYPE k; /*[4; ]*/ 11 - 9; /*[4; ]*/ INT_TYPE prv_buff1[(1 << 11)]; /*[4; ]*/ #pragma omp master { /*[4; ]*/ /*[4; ]*/ key_array[iteration] = iteration; /*[4; ]*/ key_array[iteration + 10] = (1 << 11) - iteration; /*[4; ]*/ /*[4; ]*/ /*[4; ]*/ /*[4; ]*/ for (i = 0; i < 5; i++) { /*[4; ]*/ /*[4; ]*/ partial_verify_vals[i] = key_array[test_index_array[i]]; } /*[4; ]*/ /*[4; ]*/ /*[4; ]*/ /*[4; ]*/ for (i = 0; i < (1 << 11); i++) { /*[4; ]*/ /*[4; ]*/ key_buff1[i] = 0; } } /*[4; ]*/ // #pragma omp dummyFlush BARRIER_START written([key_buff1.f, key_array.f, partial_verify_vals.f]) read([key_buff2, key_array, key_array.f, key_buff2.f, i]) /*[4; ]*/ #pragma omp barrier /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ for (i = 0; i < (1 << 11); i++) { /*[5; 8; ]*/ /*[5; 8; ]*/ prv_buff1[i] = 0; } /*[5; 8; ]*/ #pragma omp for nowait /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ for (i = 0; i < (1 << 16); i++) { /*[5; 8; ]*/ /*[5; 8; ]*/ key_buff2[i] = key_array[i]; /*[5; 8; ]*/ prv_buff1[key_buff2[i]]++; } /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ for (i = 0; i < (1 << 11) - 1; i++) { /*[5; 8; ]*/ /*[5; 8; ]*/ prv_buff1[i + 1] += prv_buff1[i]; } /*[5; 8; ]*/ // #pragma omp dummyFlush CRITICAL_START written([key_buff2.f]) read([key_buff1.f, key_buff1]) /*[5; 8; ]*/ #pragma omp critical { /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ /*[5; 8; ]*/ for (i = 0; i < (1 << 11); i++) { /*[5; 8; ]*/ /*[5; 8; ]*/ key_buff1[i] += prv_buff1[i]; } } /*[5; 8; ]*/ // #pragma omp dummyFlush CRITICAL_END written([key_buff1.f]) read([]) /*[5; 8; ]*/ // #pragma omp dummyFlush BARRIER_START written([]) read([key_buff1.f, test_rank_array.f, key_buff1, test_rank_array, passed_verification, partial_verify_vals, partial_verify_vals.f, printf, _imopVarPre35]) /*[5; 8; ]*/ #pragma omp barrier /*[6; ]*/ #pragma omp master { /*[6; ]*/ /*[6; ]*/ /*[6; ]*/ /*[6; ]*/ /*[6; ]*/ for (i = 0; i < 5; i++) { /*[6; ]*/ /*[6; ]*/ k = partial_verify_vals[i]; /*[6; ]*/ int _imopVarPre35; /*[6; ]*/ _imopVarPre35 = 0 <= k; /*[6; ]*/ /*[6; ]*/ if (_imopVarPre35) { /*[6; ]*/ /*[6; ]*/ _imopVarPre35 = k <= (1 << 16) - 1; } /*[6; ]*/ /*[6; ]*/ if (_imopVarPre35) { /*[6; ]*/ /*[6; ]*/ /*[6; ]*/ switch ('S') { /*[]*/ /*[6; ]*/ /*[6; ]*/ case 'S': if (i <= 2) { /*[6; ]*/ /*[6; ]*/ /*[6; ]*/ if (key_buff1[k - 1] != test_rank_array[i] + iteration) { /*[6; ]*/ /*[6; ]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[6; ]*/ } else { /*[6; ]*/ /*[6; ]*/ passed_verification++; } } else { /*[6; ]*/ /*[6; ]*/ /*[6; ]*/ if (key_buff1[k - 1] != test_rank_array[i] - iteration) { /*[6; ]*/ /*[6; ]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[6; ]*/ } else { /*[6; ]*/ /*[6; ]*/ passed_verification++; } } /*[6; ]*/ break; /*[]*/ /*[]*/ case 'W': if (i < 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] + (iteration - 2)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] - iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ /*[]*/ case 'A': if (i <= 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] + (iteration - 1)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] - (iteration - 1)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ case 'B': ; /*[]*/ int _imopVarPre36; /*[]*/ int _imopVarPre37; /*[]*/ _imopVarPre36 = i == 1; /*[]*/ /*[]*/ if (!_imopVarPre36) { /*[]*/ /*[]*/ _imopVarPre37 = i == 2; /*[]*/ /*[]*/ if (!_imopVarPre37) { /*[]*/ /*[]*/ _imopVarPre37 = i == 4; } /*[]*/ _imopVarPre36 = _imopVarPre37; } /*[]*/ /*[]*/ if (_imopVarPre36) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] + iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] - iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ /*[]*/ case 'C': if (i <= 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] + iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i] - iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; } } } /*[6; ]*/ /*[6; ]*/ if (iteration == 10) { /*[6; ]*/ /*[6; ]*/ key_buff_ptr_global = key_buff1; } } } /*[]*/ /*[]*/ /*[]*/ int main(int argc, char **argv) { /*[]*/ /*[]*/ int _imopVarPre39; /*[]*/ int i; /*[]*/ int iteration; /*[]*/ int nthreads = 1; /*[]*/ double timecounter; /*[]*/ /*[]*/ /*[]*/ /*[]*/ for (i = 0; i < 5; i++) { /*[]*/ /*[]*/ /*[]*/ switch ('S') { /*[]*/ /*[]*/ case 'S': test_index_array[i] = S_test_index_array[i]; /*[]*/ test_rank_array[i] = S_test_rank_array[i]; /*[]*/ break; /*[]*/ case 'A': test_index_array[i] = A_test_index_array[i]; /*[]*/ test_rank_array[i] = A_test_rank_array[i]; /*[]*/ break; /*[]*/ case 'W': test_index_array[i] = W_test_index_array[i]; /*[]*/ test_rank_array[i] = W_test_rank_array[i]; /*[]*/ break; /*[]*/ case 'B': test_index_array[i] = B_test_index_array[i]; /*[]*/ test_rank_array[i] = B_test_rank_array[i]; /*[]*/ break; /*[]*/ case 'C': test_index_array[i] = C_test_index_array[i]; /*[]*/ test_rank_array[i] = C_test_rank_array[i]; /*[]*/ break; } } /*[]*/ #pragma omp parallel private(iteration) { /*[1; ]*/ /*[1; ]*/ double _imopVarPre42; /*[1; ]*/ int _imopVarPre43; /*[1; ]*/ #pragma omp master { /*[1; ]*/ /*[1; ]*/ ; /*[1; ]*/ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - IS Benchmark\n\n"); /*[1; ]*/ /*[1; ]*/ _imopVarPre39 = (1 << 16); /*[1; ]*/ printf(" Size: %d (class %c)\n", _imopVarPre39, 'S'); /*[1; ]*/ /*[1; ]*/ printf(" Iterations: %d\n", 10); /*[1; ]*/ /*[1; ]*/ timer_clear(0); /*[1; ]*/ /*[1; ]*/ create_seq(314159265.00, 1220703125.00); /*[1; ]*/ } /*[1; ]*/ int iteration_imopVarPre75; /*[1; ]*/ iteration_imopVarPre75 = 1; /*[1; ]*/ INT_TYPE i_imopVarPre76; /*[1; ]*/ INT_TYPE k; /*[1; ]*/ 11 - 9; /*[1; ]*/ INT_TYPE prv_buff1[(1 << 11)]; /*[1; ]*/ #pragma omp master { /*[1; ]*/ /*[1; ]*/ key_array[iteration_imopVarPre75] = iteration_imopVarPre75; /*[1; ]*/ key_array[iteration_imopVarPre75 + 10] = (1 << 11) - iteration_imopVarPre75; /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ for (i_imopVarPre76 = 0; i_imopVarPre76 < 5; i_imopVarPre76++) { /*[1; ]*/ /*[1; ]*/ partial_verify_vals[i_imopVarPre76] = key_array[test_index_array[i_imopVarPre76]]; } /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ /*[1; ]*/ for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11); i_imopVarPre76++) { /*[1; ]*/ /*[1; ]*/ key_buff1[i_imopVarPre76] = 0; } } /*[1; ]*/ // #pragma omp dummyFlush BARRIER_START written([key_buff1.f, key_array.f, test_rank_array.f, seed, T46, R46, KS, test_index_array.f, partial_verify_vals.f, T23, R23, _imopVarPre39]) read([key_buff2, key_array, key_array.f, key_buff2.f, i_imopVarPre76]) /*[1; ]*/ #pragma omp barrier /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11); i_imopVarPre76++) { /*[13; ]*/ /*[13; ]*/ prv_buff1[i_imopVarPre76] = 0; } /*[13; ]*/ #pragma omp for nowait /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 16); i_imopVarPre76++) { /*[13; ]*/ /*[13; ]*/ key_buff2[i_imopVarPre76] = key_array[i_imopVarPre76]; /*[13; ]*/ prv_buff1[key_buff2[i_imopVarPre76]]++; } /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11) - 1; i_imopVarPre76++) { /*[13; ]*/ /*[13; ]*/ prv_buff1[i_imopVarPre76 + 1] += prv_buff1[i_imopVarPre76]; } /*[13; ]*/ // #pragma omp dummyFlush CRITICAL_START written([key_buff2.f]) read([key_buff1.f, key_buff1]) /*[13; ]*/ #pragma omp critical { /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ for (i_imopVarPre76 = 0; i_imopVarPre76 < (1 << 11); i_imopVarPre76++) { /*[13; ]*/ /*[13; ]*/ key_buff1[i_imopVarPre76] += prv_buff1[i_imopVarPre76]; } } /*[13; ]*/ // #pragma omp dummyFlush CRITICAL_END written([key_buff1.f]) read([key_buff1.f, test_rank_array.f, key_buff1, test_rank_array, passed_verification, partial_verify_vals, partial_verify_vals.f, printf, _imopVarPre35]) /*[13; ]*/ #pragma omp master { /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ for (i_imopVarPre76 = 0; i_imopVarPre76 < 5; i_imopVarPre76++) { /*[13; ]*/ /*[13; ]*/ k = partial_verify_vals[i_imopVarPre76]; /*[13; ]*/ int _imopVarPre35; /*[13; ]*/ _imopVarPre35 = 0 <= k; /*[13; ]*/ /*[13; ]*/ if (_imopVarPre35) { /*[13; ]*/ /*[13; ]*/ _imopVarPre35 = k <= (1 << 16) - 1; } /*[13; ]*/ /*[13; ]*/ if (_imopVarPre35) { /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ switch ('S') { /*[]*/ /*[13; ]*/ /*[13; ]*/ case 'S': if (i_imopVarPre76 <= 2) { /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + iteration_imopVarPre75) { /*[13; ]*/ /*[13; ]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[13; ]*/ } else { /*[13; ]*/ /*[13; ]*/ passed_verification++; } } else { /*[13; ]*/ /*[13; ]*/ /*[13; ]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) { /*[13; ]*/ /*[13; ]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[13; ]*/ } else { /*[13; ]*/ /*[13; ]*/ passed_verification++; } } /*[13; ]*/ break; /*[]*/ /*[]*/ case 'W': if (i_imopVarPre76 < 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + (iteration_imopVarPre75 - 2)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ /*[]*/ case 'A': if (i_imopVarPre76 <= 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + (iteration_imopVarPre75 - 1)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - (iteration_imopVarPre75 - 1)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ case 'B': ; /*[]*/ int _imopVarPre36; /*[]*/ int _imopVarPre37; /*[]*/ _imopVarPre36 = i_imopVarPre76 == 1; /*[]*/ /*[]*/ if (!_imopVarPre36) { /*[]*/ /*[]*/ _imopVarPre37 = i_imopVarPre76 == 2; /*[]*/ /*[]*/ if (!_imopVarPre37) { /*[]*/ /*[]*/ _imopVarPre37 = i_imopVarPre76 == 4; } /*[]*/ _imopVarPre36 = _imopVarPre37; } /*[]*/ /*[]*/ if (_imopVarPre36) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + iteration_imopVarPre75) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ /*[]*/ case 'C': if (i_imopVarPre76 <= 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] + iteration_imopVarPre75) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k - 1] != test_rank_array[i_imopVarPre76] - iteration_imopVarPre75) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration_imopVarPre75, i_imopVarPre76); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; } } } /*[13; ]*/ /*[13; ]*/ if (iteration_imopVarPre75 == 10) { /*[13; ]*/ /*[13; ]*/ key_buff_ptr_global = key_buff1; } } /*[13; ]*/ // #pragma omp dummyFlush BARRIER_START written([key_buff_ptr_global, passed_verification]) read([key_array, key_array.f, timer_start, key_buff1, partial_verify_vals, test_index_array.f, test_index_array, printf]) /*[13; ]*/ #pragma omp barrier /*[7; ]*/ #pragma omp master { /*[7; ]*/ /*[7; ]*/ passed_verification = 0; /*[7; ]*/ /*[7; ]*/ if ('S' != 'S') { /*[7; ]*/ /*[7; ]*/ printf("\n iteration\n"); /*[7; ]*/ } /*[7; ]*/ timer_start(0); /*[7; ]*/ } /*[4; 7; ]*/ /*[17; 4; 7; ]*/ /*[17; ]*/ /*[4; 7; ]*/ for (iteration = 1; iteration <= 10; iteration++) { /*[17; 4; 7; ]*/ /*[17; 4; 7; ]*/ #pragma omp master { /*[17; 4; 7; ]*/ /*[17; 4; 7; ]*/ /*[17; 4; 7; ]*/ if ('S' != 'S') { /*[17; 4; 7; ]*/ /*[17; 4; 7; ]*/ printf(" %d\n", iteration); /*[17; 4; 7; ]*/ } } /*[17; 7; ]*/ INT_TYPE i_imopVarPre77; /*[17; 7; ]*/ INT_TYPE k_imopVarPre78; /*[17; 7; ]*/ 11 - 9; /*[17; 7; ]*/ INT_TYPE prv_buff1_imopVarPre79[(1 << 11)]; /*[17; 7; ]*/ #pragma omp master { /*[17; 7; ]*/ /*[17; 7; ]*/ key_array[iteration] = iteration; /*[17; 7; ]*/ key_array[iteration + 10] = (1 << 11) - iteration; /*[17; 7; ]*/ /*[17; 7; ]*/ /*[17; 7; ]*/ /*[17; 7; ]*/ for (i_imopVarPre77 = 0; i_imopVarPre77 < 5; i_imopVarPre77++) { /*[17; 7; ]*/ /*[17; 7; ]*/ partial_verify_vals[i_imopVarPre77] = key_array[test_index_array[i_imopVarPre77]]; } /*[17; 7; ]*/ /*[17; 7; ]*/ /*[17; 7; ]*/ /*[17; 7; ]*/ for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11); i_imopVarPre77++) { /*[17; 7; ]*/ /*[17; 7; ]*/ key_buff1[i_imopVarPre77] = 0; } } /*[17; 7; ]*/ // #pragma omp dummyFlush BARRIER_START written([key_buff1.f, key_array.f, passed_verification, partial_verify_vals.f]) read([key_buff2, key_array, key_array.f, key_buff2.f, i_imopVarPre77]) /*[17; 7; ]*/ #pragma omp barrier /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11); i_imopVarPre77++) { /*[16; ]*/ /*[16; ]*/ prv_buff1_imopVarPre79[i_imopVarPre77] = 0; } /*[16; ]*/ #pragma omp for nowait /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 16); i_imopVarPre77++) { /*[16; ]*/ /*[16; ]*/ key_buff2[i_imopVarPre77] = key_array[i_imopVarPre77]; /*[16; ]*/ prv_buff1_imopVarPre79[key_buff2[i_imopVarPre77]]++; } /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11) - 1; i_imopVarPre77++) { /*[16; ]*/ /*[16; ]*/ prv_buff1_imopVarPre79[i_imopVarPre77 + 1] += prv_buff1_imopVarPre79[i_imopVarPre77]; } /*[16; ]*/ // #pragma omp dummyFlush CRITICAL_START written([key_buff2.f]) read([key_buff1.f, key_buff1]) /*[16; ]*/ #pragma omp critical { /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ for (i_imopVarPre77 = 0; i_imopVarPre77 < (1 << 11); i_imopVarPre77++) { /*[16; ]*/ /*[16; ]*/ key_buff1[i_imopVarPre77] += prv_buff1_imopVarPre79[i_imopVarPre77]; } } /*[16; ]*/ // #pragma omp dummyFlush CRITICAL_END written([key_buff1.f]) read([key_buff1.f, test_rank_array.f, key_buff1, test_rank_array, _imopVarPre35, passed_verification, partial_verify_vals, partial_verify_vals.f, printf]) /*[16; ]*/ #pragma omp master { /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ for (i_imopVarPre77 = 0; i_imopVarPre77 < 5; i_imopVarPre77++) { /*[16; ]*/ /*[16; ]*/ k_imopVarPre78 = partial_verify_vals[i_imopVarPre77]; /*[16; ]*/ int _imopVarPre35; /*[16; ]*/ _imopVarPre35 = 0 <= k_imopVarPre78; /*[16; ]*/ /*[16; ]*/ if (_imopVarPre35) { /*[16; ]*/ /*[16; ]*/ _imopVarPre35 = k_imopVarPre78 <= (1 << 16) - 1; } /*[16; ]*/ /*[16; ]*/ if (_imopVarPre35) { /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ switch ('S') { /*[]*/ /*[16; ]*/ /*[16; ]*/ case 'S': if (i_imopVarPre77 <= 2) { /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + iteration) { /*[16; ]*/ /*[16; ]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[16; ]*/ } else { /*[16; ]*/ /*[16; ]*/ passed_verification++; } } else { /*[16; ]*/ /*[16; ]*/ /*[16; ]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) { /*[16; ]*/ /*[16; ]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[16; ]*/ } else { /*[16; ]*/ /*[16; ]*/ passed_verification++; } } /*[16; ]*/ break; /*[]*/ /*[]*/ case 'W': if (i_imopVarPre77 < 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + (iteration - 2)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ /*[]*/ case 'A': if (i_imopVarPre77 <= 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + (iteration - 1)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - (iteration - 1)) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ case 'B': ; /*[]*/ int _imopVarPre36; /*[]*/ int _imopVarPre37; /*[]*/ _imopVarPre36 = i_imopVarPre77 == 1; /*[]*/ /*[]*/ if (!_imopVarPre36) { /*[]*/ /*[]*/ _imopVarPre37 = i_imopVarPre77 == 2; /*[]*/ /*[]*/ if (!_imopVarPre37) { /*[]*/ /*[]*/ _imopVarPre37 = i_imopVarPre77 == 4; } /*[]*/ _imopVarPre36 = _imopVarPre37; } /*[]*/ /*[]*/ if (_imopVarPre36) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; /*[]*/ /*[]*/ case 'C': if (i_imopVarPre77 <= 2) { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] + iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } else { /*[]*/ /*[]*/ /*[]*/ if (key_buff1[k_imopVarPre78 - 1] != test_rank_array[i_imopVarPre77] - iteration) { /*[]*/ /*[]*/ printf("Failed partial verification: " "iteration %d, test key %d\n", iteration, i_imopVarPre77); /*[]*/ } else { /*[]*/ /*[]*/ passed_verification++; } } /*[]*/ break; } } } /*[16; ]*/ /*[16; ]*/ if (iteration == 10) { /*[16; ]*/ /*[16; ]*/ key_buff_ptr_global = key_buff1; } } /*[16; ]*/ // #pragma omp dummyFlush BARRIER_START written([key_buff_ptr_global, passed_verification]) read([key_array, key_array.f, key_buff1, partial_verify_vals, test_index_array.f, test_index_array, printf]) /*[16; ]*/ #pragma omp barrier } /*[17; 7; ]*/ // #pragma omp dummyFlush BARRIER_START written([passed_verification]) read([key_array, key_buff2, key_buff1.f, key_array.f, key_buff2.f, key_buff_ptr_global, c_print_results, _imopVarPre42, _imopVarPre43, timer_read, full_verify, printf, nthreads, nullCell, timecounter, passed_verification, timer_stop]) /*[17; 7; ]*/ #pragma omp barrier /*[8; 16; ]*/ #pragma omp master { /*[8; 16; ]*/ /*[8; 16; ]*/ timer_stop(0); /*[8; 16; ]*/ /*[8; 16; ]*/ timecounter = timer_read(0); /*[8; 16; ]*/ /*[8; 16; ]*/ full_verify(); /*[8; 16; ]*/ /*[8; 16; ]*/ /*[8; 16; ]*/ if (passed_verification != 5 * 10 + 1) { /*[8; 16; ]*/ /*[8; 16; ]*/ passed_verification = 0; } /*[8; 16; ]*/ _imopVarPre42 = ((double) (10 * (1 << 16))) / timecounter / 1000000.; /*[8; 16; ]*/ _imopVarPre43 = (1 << 16); /*[8; 16; ]*/ c_print_results("IS", 'S', _imopVarPre43, 0, 0, 10, nthreads, timecounter, _imopVarPre42, "keys ranked", passed_verification, "3.0 structured", "21 Jul 2017", "gcc", "gcc", "(none)", "-I../common", "-O3 -fopenmp", "-O3 -fopenmp", "randlc"); /*[8; 16; ]*/ } } }
GB_binop__lxor_int16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__lxor_int16) // A.*B function (eWiseMult): GB (_AemultB_01__lxor_int16) // A.*B function (eWiseMult): GB (_AemultB_02__lxor_int16) // A.*B function (eWiseMult): GB (_AemultB_03__lxor_int16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__lxor_int16) // A*D function (colscale): GB (_AxD__lxor_int16) // D*A function (rowscale): GB (_DxB__lxor_int16) // C+=B function (dense accum): GB (_Cdense_accumB__lxor_int16) // C+=b function (dense accum): GB (_Cdense_accumb__lxor_int16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__lxor_int16) // C=scalar+B GB (_bind1st__lxor_int16) // C=scalar+B' GB (_bind1st_tran__lxor_int16) // C=A+scalar GB (_bind2nd__lxor_int16) // C=A'+scalar GB (_bind2nd_tran__lxor_int16) // C type: int16_t // A type: int16_t // B,b type: int16_t // BinaryOp: cij = ((aij != 0) != (bij != 0)) #define GB_ATYPE \ int16_t #define GB_BTYPE \ int16_t #define GB_CTYPE \ int16_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int16_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int16_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int16_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) != (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LXOR || GxB_NO_INT16 || GxB_NO_LXOR_INT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__lxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__lxor_int16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__lxor_int16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int16_t int16_t bwork = (*((int16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__lxor_int16) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__lxor_int16) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *restrict Cx = (int16_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__lxor_int16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__lxor_int16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__lxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__lxor_int16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__lxor_int16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__lxor_int16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t *Cx = (int16_t *) Cx_output ; int16_t x = (*((int16_t *) x_input)) ; int16_t *Bx = (int16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int16_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) != (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__lxor_int16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int16_t *Cx = (int16_t *) Cx_output ; int16_t *Ax = (int16_t *) Ax_input ; int16_t y = (*((int16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int16_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) != (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) != (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__lxor_int16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t x = (*((const int16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) != (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__lxor_int16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int16_t y = (*((const int16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dbscan_vp.h
#ifndef DBSCAN_VP_H #define DBSCAN_VP_H #include "vptree.h" #include <Eigen/Dense> namespace clustering { class DBSCAN_VP : private boost::noncopyable { private: static inline double dist( const Eigen::VectorXf& p1, const Eigen::VectorXf& p2 ) { return ( p1 - p2 ).norm(); } const Dataset::Ptr m_dset; public: typedef VPTREE< Eigen::VectorXf, dist > TVpTree; typedef std::vector< int32_t > Labels; typedef boost::shared_ptr< DBSCAN_VP > Ptr; DBSCAN_VP( const Dataset::Ptr dset ) : m_dset( dset ) , m_fit_time( .0 ) , m_predict_time( .0 ) { } ~DBSCAN_VP() { } TVpTree::Ptr get_vp() const { return m_vp_tree; } void fit() { const Dataset::DataContainer& d = m_dset->data(); const double start = omp_get_wtime(); m_vp_tree = boost::make_shared< TVpTree >(); m_vp_tree->create( m_dset ); const size_t dlen = d.size(); prepare_labels( dlen ); m_fit_time = omp_get_wtime() - start; } const std::vector< double > predict_eps( size_t k ) { const Dataset::DataContainer& d = m_dset->data(); std::vector< double > r( d.size(), 0.0 ); omp_set_dynamic( 1 ); #pragma omp parallel for for ( size_t i = 0; i < d.size(); ++i ) { TVpTree::TNeighborsList nlist; m_vp_tree->search_by_k( d[i], k, nlist, true ); if ( nlist.size() >= k ) { r[i] = nlist[0].second; } } std::sort( r.begin(), r.end() ); return std::move( r ); } uint32_t predict( double eps, size_t min_elems ) { std::unique_ptr< std::vector< uint32_t > > candidates( new std::vector< uint32_t >() ); std::unique_ptr< std::vector< uint32_t > > new_candidates( new std::vector< uint32_t >() ); int32_t cluster_id = 0; TVpTree::TNeighborsList index_neigh; TVpTree::TNeighborsList n_neigh; const double start = omp_get_wtime(); const Dataset::DataContainer& d = m_dset->data(); const size_t dlen = d.size(); for ( uint32_t pid = 0; pid < dlen; ++pid ) { if ( pid % 10000 == 0 ) VLOG( 1 ) << "progress: pid = " << pid << " " << ( float( pid ) / float( dlen ) ) * 100 << "%"; if ( m_labels[pid] >= 0 ) continue; find_neighbors( d, eps, pid, index_neigh ); // VLOG( 1 ) << "Analyzing pid " << pid << " Neigh size " << index_neigh.size(); if ( index_neigh.size() < min_elems ) continue; m_labels[pid] = cluster_id; //VLOG( 1 ) << "pid = " << pid << " neig = " << index_neigh.size(); candidates->clear(); for ( const auto& nn : index_neigh ) { if ( m_labels[nn.first] >= 0 ) continue; m_labels[nn.first] = cluster_id; // find_neighbors( d, eps, nn.first, n_neigh ); // VLOG( 1 ) << "nn.first = " << nn.first << " neig = " << n_neigh.size(); // if ( n_neigh.size() >= min_elems ) { candidates->push_back( nn.first ); // } } while ( candidates->size() > 0 ) { // std::cout << "\tcandidates = " << candidates.size() << std::endl; VLOG( 1 ) << "candidates size " << candidates->size(); new_candidates->clear(); const float csize = float( candidates->size() ); #pragma omp parallel for ordered schedule( dynamic ) for ( size_t j = 0; j < candidates->size(); ++j ) { // for ( const auto& c_pid : *candidates ) { TVpTree::TNeighborsList c_neigh; const uint32_t c_pid = candidates->at( j ); // VLOG( 1 ) << "c_pid = " << c_pid << " " << m_labels[c_pid]; // if ( m_labels[c_pid] >= 0 && m_labels[c_pid] != cluster_id ) // continue; find_neighbors( d, eps, c_pid, c_neigh ); if ( c_neigh.size() < min_elems ) continue; // VLOG( 1 ) << "c_pid = " << c_pid << " neig = " << c_neigh.size(); #pragma omp ordered { for ( const auto& nn : c_neigh ) { if ( m_labels[nn.first] >= 0 ) continue; m_labels[nn.first] = cluster_id; // find_neighbors( d, eps, nn.first, n_neigh ); // VLOG( 1 ) << "nn.first = " << nn.first << " neig = " << n_neigh.size(); // if ( n_neigh.size() >= min_elems ) { new_candidates->push_back( nn.first ); } if ( j % 1000 == 0 ) VLOG( 1 ) << "sub progress: j = " << j << " " << ( float( j ) / csize ) * 100 << "% " << new_candidates->size(); } // } } VLOG( 1 ) << "new candidates = " << new_candidates->size(); std::swap( candidates, new_candidates ); } ++cluster_id; } m_predict_time = omp_get_wtime() - start; return cluster_id; } void reset() { m_vp_tree.reset(); m_labels.clear(); } const Labels& get_labels() const { return m_labels; } const double get_fit_time() const { return m_fit_time; } const double get_predict_time() const { return m_predict_time; } private: void find_neighbors( const Dataset::DataContainer& d, double eps, uint32_t pid, TVpTree::TNeighborsList& neighbors ) { neighbors.clear(); m_vp_tree->search_by_dist( d[pid], eps, neighbors ); } Labels m_labels; void prepare_labels( size_t s ) { m_labels.resize( s ); for ( auto& l : m_labels ) { l = -1; } } TVpTree::Ptr m_vp_tree; double m_fit_time; double m_predict_time; }; // std::ostream& operator<<( std::ostream& o, DBSCAN& d ); } #endif // DBSCAN_VP_H
fox_floats_timer_caching_omp_fileIO_benchmark.c
/* fox_floats_timer_caching_omp_fileIO_benchmark.c -- uses Fox's algorithm to multiply two square matrices * * Implementation of parallel matrix multiplication: * LaTeX: $C_{i,j} = \sum_{k} A_{i,k}B_{k,j}$ * * Input: * Input Matrix file name: A.dat, B.dat * * Output: * Output Matrix file name: C.dat * Output Sub-matrices file name: SubMatrices.dat * * Notes: * 1. Assumes the number of processes is a perfect square * 2. The array member of the matrices is statically allocated * * See Chap 7, pp. 113 & ff and pp. 125 & ff in PPMPI */ /* Compiler command: * mpiicc -O3 -qopenmp -qopt-report-phase=vec -qopt-report=3 fox_floats_timer_caching_omp_fileIO_benchmark.c * -o fox_floats_timer_caching_omp_fileIO_benchmark * * Run command: * mpirun -n -4 ./fox_floats_timer_caching_omp */ /* Head files */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <mpi.h> #include <omp.h> // define problem scale, matrix row/col size #define PROBLEM_SCALE 8192 // define whether or not Print Matices in the Command Line #define PRINT_A 0 #define PRINT_B 0 #define PRINT_C 0 #define PRINT_LOCAL_A 0 #define PRINT_LOCAL_B 0 #define PRINT_LOCAL_C 0 // define float precision, 4 byte single-precision float or 8 byte double-precision float #define FLOAT double #define FLOAT_MPI MPI_DOUBLE // Define threads speed-up affnity in the computing #define NUM_THREADS 2 // Define threads affinity "scatter" or "compact" #define AFFINITY "KMP_AFFINITY = compact" /* Type define structure of process grid */ typedef struct { int p; /* Total number of processes */ MPI_Comm comm; /* Communicator for entire grid */ MPI_Comm row_comm; /* Communicator for my row */ MPI_Comm col_comm; /* Communicator for my col */ int q; /* Order of grid */ int my_row; /* My row number */ int my_col; /* My column number */ int my_rank; /* My rank in the grid comm */ } GRID_INFO_T; /* Type define structure of local matrix */ #define MAX 2097152 // Maximum number of elements in the array that store the local matrix (2^21) typedef struct { int n_bar; #define Order(A) ((A)->n_bar) // defination with parameters FLOAT entries[MAX]; #define Entry(A,i,j) (*(((A)->entries) + ((A)->n_bar)*(i) + (j))) // defination with parameters, Array dereference } LOCAL_MATRIX_T; /* Function Declarations */ LOCAL_MATRIX_T* Local_matrix_allocate(int n_bar); void Free_local_matrix(LOCAL_MATRIX_T** local_A); void Read_matrix_A(char* prompt, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Read matrix A from a file void Read_matrix_B(char* prompt, LOCAL_MATRIX_T* local_B, // for continuous memory access, local A(i,k)*B(k,j) = A(i,k)*B^{T}(j,k) GRID_INFO_T* grid, int n); // Read matrix B from a file void Print_matrix_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid, int n); // Print matrix A in the command line void Print_matrix_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid, int n); // Print matrix B in the command line void Print_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Print matrix C in the command line void Set_to_zero(LOCAL_MATRIX_T* local_A); void Local_matrix_multiply(LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); void Build_matrix_type(LOCAL_MATRIX_T* local_A); MPI_Datatype local_matrix_mpi_t; LOCAL_MATRIX_T* temp_mat; // global LOCAL_MATRIX_T* type pointer void Print_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); void Print_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); void Print_local_matrices_C(char* title, LOCAL_MATRIX_T* local_B, GRID_INFO_T* grid); void Write_matrix_C(char* title, LOCAL_MATRIX_T* local_C, GRID_INFO_T* grid, int n); // Write matrix multiplication to a file void Write_local_matrices_A(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix A to a file void Write_local_matrices_B(char* title, LOCAL_MATRIX_T* local_B, // Speical print function for local matrix B^{T}(j,k) GRID_INFO_T* grid); // Write local matrix B to a file void Write_local_matrices_C(char* title, LOCAL_MATRIX_T* local_A, GRID_INFO_T* grid); // Write local matrix C to a file /*********************************************************/ main(int argc, char* argv[]) { FILE *fp; int p; int my_rank; GRID_INFO_T grid; LOCAL_MATRIX_T* local_A; LOCAL_MATRIX_T* local_B; LOCAL_MATRIX_T* local_C; int n; int n_bar; double timer_start; double timer_end; int content; int i; int j; void Setup_grid(GRID_INFO_T* grid); void Fox(int n, GRID_INFO_T* grid, LOCAL_MATRIX_T* local_A, LOCAL_MATRIX_T* local_B, LOCAL_MATRIX_T* local_C); // Matrix Generator fp = fopen("A.dat", "w"); // Generate and print matrix A into a file for (i = 0; i < PROBLEM_SCALE; i++) { for (j = 0; j < PROBLEM_SCALE; j++) if(i == j){ fprintf(fp,"%d ", 1); } else { fprintf(fp,"%d ", 0); } fprintf(fp,"\n"); } fclose(fp); fp = fopen("B.dat", "w"); // Generate and print matrix B into a file for (i = 0; i < PROBLEM_SCALE; i++){ for (j = 0; j < PROBLEM_SCALE; j++) fprintf(fp,"%d ", (i*PROBLEM_SCALE)+j); fprintf(fp, "\n"); } fclose(fp); // SPMD Mode start from here (Processess fork from here) MPI_Init(&argc, &argv); // MPI initializing MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator // Initial OpenMP Environment omp_set_num_threads(NUM_THREADS); kmp_set_defaults(AFFINITY); Setup_grid(&grid); // Set up Processess grid if (my_rank == 0) { fp = fopen("A.dat","r"); n = 0; while((content = fgetc(fp)) != EOF) { //printf("fgetc = %d\n", content); if(content != 0x20 && content != 0x0A) n++; } fclose(fp); n = (int) sqrt((double) n); printf("We read the order of the matrices from A.dat is\n %d\n", n); // while(fgetc(fp) != EOF) n++; // printf("What's the order of the matrices?\n"); // scanf("%d", &n); // Overall Matrix's Order } MPI_Bcast(&n, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI broadcast the overall matrix's order n_bar = n/grid.q; // \bar n is the local matrix's order local_A = Local_matrix_allocate(n_bar); // Allocate local matrix A Order(local_A) = n_bar; // Local matrix A's order Read_matrix_A("Read A from A.dat", local_A, &grid, n); // Read local matrices A from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_A == 1) Print_matrix_A("We read A =", local_A, &grid, n);// Print local matrices A from process 0 by using stdout, and send them to each process (Procedure) local_B = Local_matrix_allocate(n_bar); // Allocate local matrix Order(local_B) = n_bar; // Local matrix B's order Read_matrix_B("Read B from B.dat", local_B, &grid, n); // Read local matrix B as it's local transpose from process 0 by using stdin, and send them to each process (Procedure) if (PRINT_B == 1) Print_matrix_B("We read B =", local_B, &grid, n);// Print local matrix B as it's local transpose from process 0 by using stdout, and send them to each process (Procedure) Build_matrix_type(local_A); // Buid local_A's MPI matrix data type temp_mat = Local_matrix_allocate(n_bar); // Allocate temporary matrix of order n $\time$ n local_C = Local_matrix_allocate(n_bar); // Allocate matrix local_C Order(local_C) = n_bar; // Set matrix local_C's order MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier timer_start = MPI_Wtime(); // Get the MPI wall time Fox(n, &grid, local_A, local_B, local_C); // FOX parallel matrix multiplication Algorithm implement function timer_end = MPI_Wtime(); // Get the MPI wall time MPI_Barrier(MPI_COMM_WORLD); // Set the MPI process barrier Write_matrix_C("Write C into the C.dat", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) if (PRINT_C == 1) Print_matrix_C("The product is", local_C, &grid, n); // Print matrix local_C (parallel matrix multiplication result) Write_local_matrices_A("Write split of local matrix A into local_A.dat", local_A, &grid); // Write local matrix A into file if (PRINT_LOCAL_A == 1) Print_local_matrices_A("Split of local matrix A", local_A, &grid); // Print matrix A split in processess Write_local_matrices_B("Write split of local matrix B into local_B.dat", local_B, &grid); // Write local matrix B into file, special for row-major storage if (PRINT_LOCAL_B == 1) Print_local_matrices_B("Split of local matrix B", local_B, &grid); // Print matrix B split in processess, special for row-major storage Write_local_matrices_C("Write split of local matrix C into local_C.dat", local_C, &grid); // Print matrix C split in processess if (PRINT_LOCAL_C == 1) Print_local_matrices_C("Split of local matrix C", local_C, &grid); // Print matrix C split in processess Free_local_matrix(&local_A); // Free local matrix local_A Free_local_matrix(&local_B); // Free local matrix local_B Free_local_matrix(&local_C); // Free local matrix local_C if(my_rank == 0) printf("Parallel Fox Matrix Multiplication Elapsed time:\n %30.20E seconds\n", timer_end-timer_start); MPI_Finalize(); // MPI finalize, processes join and resource recycle } /* main */ /*********************************************************/ void Setup_grid( GRID_INFO_T* grid /* out */) { int old_rank; int dimensions[2]; int wrap_around[2]; int coordinates[2]; int free_coords[2]; /* Set up Global Grid Information */ MPI_Comm_size(MPI_COMM_WORLD, &(grid->p)); MPI_Comm_rank(MPI_COMM_WORLD, &old_rank); /* We assume p is a perfect square */ // but what if it's not a perfect square grid->q = (int) sqrt((double) grid->p); dimensions[0] = dimensions[1] = grid->q; /* We want a circular shift in second dimension. */ /* Don't care about first */ wrap_around[0] = wrap_around[1] = 1; MPI_Cart_create(MPI_COMM_WORLD, 2, dimensions, wrap_around, 1, &(grid->comm)); MPI_Comm_rank(grid->comm, &(grid->my_rank)); MPI_Cart_coords(grid->comm, grid->my_rank, 2, coordinates); grid->my_row = coordinates[0]; grid->my_col = coordinates[1]; /* Set up row communicators */ free_coords[0] = 0; free_coords[1] = 1; MPI_Cart_sub(grid->comm, free_coords, &(grid->row_comm)); /* Set up column communicators */ free_coords[0] = 1; free_coords[1] = 0; MPI_Cart_sub(grid->comm, free_coords, &(grid->col_comm)); } /* Setup_grid */ /*********************************************************/ void Fox( int n /* in */, GRID_INFO_T* grid /* in */, LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { LOCAL_MATRIX_T* temp_A; /* Storage for the sub- */ /* matrix of A used during */ /* the current stage */ int stage; int bcast_root; int n_bar; /* n/sqrt(p) */ int source; int dest; MPI_Status status; n_bar = n/grid->q; Set_to_zero(local_C); /* Calculate addresses for row circular shift of B */ source = (grid->my_row + 1) % grid->q; dest = (grid->my_row + grid->q - 1) % grid->q; /* Set aside storage for the broadcast block of A */ temp_A = Local_matrix_allocate(n_bar); for (stage = 0; stage < grid->q; stage++) { bcast_root = (grid->my_row + stage) % grid->q; if (bcast_root == grid->my_col) { // Process P_{ii} broadcast A_{ii} in process gird's row commnunicator MPI_Bcast(local_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(local_A, local_B, local_C); } else { // temp_A is a buffer for process P_{ij} to store A_{ij} MPI_Bcast(temp_A, 1, local_matrix_mpi_t, bcast_root, grid->row_comm); Local_matrix_multiply(temp_A, local_B, local_C); } MPI_Sendrecv_replace(local_B, 1, local_matrix_mpi_t, // MPI send and receive with single buffer dest, 0, source, 0, grid->col_comm, &status); // Circular shift of process grid B's row, after local multiplication operation } /* for */ } /* Fox */ /*********************************************************/ LOCAL_MATRIX_T* Local_matrix_allocate(int local_order) { LOCAL_MATRIX_T* temp; temp = (LOCAL_MATRIX_T*) malloc(sizeof(LOCAL_MATRIX_T)); return temp; } /* Local_matrix_allocate */ /*********************************************************/ void Free_local_matrix( LOCAL_MATRIX_T** local_A_ptr /* in/out */) { free(*local_A_ptr); } /* Free_local_matrix */ /*********************************************************/ /* Read and distribute matrix for matrix A: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_A( char* prompt /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("A.dat","r"); temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { for (mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp, "%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_A); mat_col++) fscanf(fp,"%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_A), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); } } /* Read_matrix */ /*********************************************************/ /* Read and distribute matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * read a block of n_bar floats on process 0 * and send them to the appropriate process. */ void Read_matrix_B( char* prompt /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int dest; int coords[2]; FLOAT *temp; MPI_Status status; if (grid->my_rank == 0) { // Process 0 read matrix input from stdin and send them to other processess fp = fopen("B.dat","r"); temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", prompt); fflush(stdout); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &dest); if (dest == 0) { // process 0 (local) for (mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage /* scanf("%lf", (local_B->entries)+mat_col*Order(local_B)+mat_row); // switch rows and colums in local_B, for column major storage */ /* scanf("%lf", (local_A->entries)+mat_row*Order(local_A)+mat_col); */ } else { for(mat_col = 0; mat_col < Order(local_B); mat_col++) fscanf(fp, "%lf", temp + mat_col); // scanf("%lf", temp + mat_col); MPI_Send(temp, Order(local_B), FLOAT_MPI, dest, 0, grid->comm); } } } free(temp); fclose(fp); } else { // Other processess receive matrix from process 0 temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); // switch rows and colums in local_B, for column major storage for (mat_col = 0; mat_col < Order(local_B); mat_col++) { MPI_Recv(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm, &status); // switch rows and colums in local_B, for column major storage for(mat_row = 0; mat_row < Order(local_B); mat_row++) Entry(local_B, mat_row, mat_col) = *(temp + mat_row); // switch rows and colums in local_B, for column major storage /* MPI_Recv(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm, &status); */ } free(temp); } } /* Read_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_A)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_A); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_A), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_A); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_A); mat_row++) MPI_Send(&Entry(local_A, mat_row, 0), Order(local_A), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_A */ /*********************************************************/ /* Recive and Print Matrix for local matrix B's transpose: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_B); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", Entry(local_B, mat_col, mat_row)); // switch rows and colums in local_B, for column major storage // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_B), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_B); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { temp = (FLOAT*) malloc(Order(local_B)*sizeof(FLOAT)); for (mat_col = 0; mat_col < Order(local_B); mat_col++) { for(mat_row = 0; mat_row < Order(local_B); mat_row++) *(temp+mat_row) = Entry(local_B, mat_row, mat_col); // switch rows and colums in local_B, for column major storage MPI_Send(temp, Order(local_B), FLOAT_MPI, 0, 0, grid->comm); } free(temp); } } /* Print_matrix_B */ /*********************************************************/ /* Recive and Print Matrix A: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Print_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", Entry(local_C, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) printf("%20.15E ", temp[mat_col]); } } printf("\n"); } free(temp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Print_matrix_C */ /*********************************************************/ /* Recive and Write Matrix C into a file: * foreach global row of the matrix, * foreach grid column * send n_bar floats to process 0 from each other process * receive a block of n_bar floats on process 0 from other processes and print them */ void Write_matrix_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* out */, GRID_INFO_T* grid /* in */, int n /* in */) { FILE *fp; int mat_row, mat_col; int grid_row, grid_col; int source; int coords[2]; FLOAT* temp; MPI_Status status; if (grid->my_rank == 0) { fp = fopen("C.dat", "w+"); temp = (FLOAT*) malloc(Order(local_C)*sizeof(FLOAT)); printf("%s\n", title); for (mat_row = 0; mat_row < n; mat_row++) { grid_row = mat_row/Order(local_C); coords[0] = grid_row; for (grid_col = 0; grid_col < grid->q; grid_col++) { coords[1] = grid_col; MPI_Cart_rank(grid->comm, coords, &source); if (source == 0) { for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", Entry(local_C, mat_row, mat_col)); // printf("%20.15E ", Entry(local_A, mat_row, mat_col)); } else { MPI_Recv(temp, Order(local_C), FLOAT_MPI, source, 0, grid->comm, &status); for(mat_col = 0; mat_col < Order(local_C); mat_col++) fprintf(fp, "%20.15E ", temp[mat_col]); // printf("%20.15E ", temp[mat_col]); } } fprintf(fp,"\n"); } free(temp); fclose(fp); } else { for (mat_row = 0; mat_row < Order(local_C); mat_row++) MPI_Send(&Entry(local_C, mat_row, 0), Order(local_C), FLOAT_MPI, 0, 0, grid->comm); } } /* Write_matrix_C */ /*********************************************************/ /* * Set local matrix's element to zero */ void Set_to_zero( LOCAL_MATRIX_T* local_A /* out */) { int i, j; for (i = 0; i < Order(local_A); i++) for (j = 0; j < Order(local_A); j++) Entry(local_A,i,j) = 0.0E0; } /* Set_to_zero */ /*********************************************************/ void Build_matrix_type( LOCAL_MATRIX_T* local_A /* in */) { MPI_Datatype temp_mpi_t; int block_lengths[2]; MPI_Aint displacements[2]; MPI_Datatype typelist[2]; MPI_Aint start_address; MPI_Aint address; MPI_Type_contiguous(Order(local_A)*Order(local_A), FLOAT_MPI, &temp_mpi_t); // Creates a contiguous datatype /* Synopsis int MPI_Type_contiguous(int count, MPI_Datatype oldtype, MPI_Datatype *newtype) Input Parameters count replication count (nonnegative integer) oldtype old datatype (handle) */ block_lengths[0] = block_lengths[1] = 1; typelist[0] = MPI_INT; typelist[1] = temp_mpi_t; MPI_Address(local_A, &start_address); // Gets the address of a location in caller's memory MPI_Address(&(local_A->n_bar), &address); /* Synopsis int MPI_Address(const void *location, MPI_Aint *address) Input Parameters location location in caller memory (choice) Output Parameters address address of location (address integer) */ displacements[0] = address - start_address; MPI_Address(local_A->entries, &address); displacements[1] = address - start_address; MPI_Type_struct(2, block_lengths, displacements, typelist, &local_matrix_mpi_t); // Creates a struct datatype /* Synopsis int MPI_Type_struct(int count, const int *array_of_blocklengths, const MPI_Aint *array_of_displacements, const MPI_Datatype *array_of_types, MPI_Datatype *newtype) Input Parameters count number of blocks (integer) -- also number of entries in arrays array_of_types , array_of_displacements and array_of_blocklengths array_of_blocklengths number of elements in each block (array) array_of_displacements byte displacement of each block (array) array_of_types type of elements in each block (array of handles to datatype objects) Output Parameters newtype new datatype (handle) */ MPI_Type_commit(&local_matrix_mpi_t); // Commits the datatype /* Synopsis int MPI_Type_commit(MPI_Datatype *datatype) Input Parameters datatype datatype (handle) */ } /* Build_matrix_type */ /*********************************************************/ /* local matrix multiplication function * withing OpenMP Thread Acceleration */ void Local_matrix_multiply( LOCAL_MATRIX_T* local_A /* in */, LOCAL_MATRIX_T* local_B /* in */, LOCAL_MATRIX_T* local_C /* out */) { int i, j, k; // int my_rank; // MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); // Get my process id in the MPI communicator #pragma omp parallel for private(i, j, k) shared(local_A, local_B, local_C) num_threads(NUM_THREADS) // Threads acceleration upgrade, parallel task split for (i = 0; i < Order(local_A); i++) { // printf("Current in the Fox Kernel:\n my process id is %d, my thread id is %d\n",my_rank,omp_get_thread_num()); for (j = 0; j < Order(local_A); j++) for (k = 0; k < Order(local_B); k++) Entry(local_C,i,j) = Entry(local_C,i,j) // switch rows and colums in local_B, for column major storage + Entry(local_A,i,k)*Entry(local_B,j,k); // continuous memory access, local matrix multiplication A(i,k)*B^T(j,k) /* Entry(local_C,i,j) = Entry(local_C,i,j) + Entry(local_A,i,k)*Entry(local_B,k,j); // non-continuous memory access, A(i,k)*B^T(j,k) is more proper */ } } /* Local_matrix_multiply */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) printf("%20.15E ", Entry(local_A,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_A */ /*********************************************************/ /* Recive and Print Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) printf("%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage printf("\n"); } } fflush(stdout); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_B */ /*********************************************************/ /* Recive and Print Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Print_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { printf("%s\n", title); printf("Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) printf("%20.15E ", Entry(local_C,i,j)); printf("\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); printf("Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) printf("%20.15E ", Entry(temp_mat,i,j)); printf("\n"); } } fflush(stdout); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Print_local_matrices_C */ /*********************************************************/ /* Recive and Write Local Matrix A: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_A( char* title /* in */, LOCAL_MATRIX_T* local_A /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_A.dat","w+"); printf("%s\n", title); fprintf(fp,"Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_A); i++) { for (j = 0; j < Order(local_A); j++) fprintf(fp,"%20.15E ", Entry(local_A,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_A, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_A */ /*********************************************************/ /* Recive and Write Local Matrix for local matrix B's transpose: * Process 0 print local matrix local_A * Other Processess send local matrix local_A to process 0 * And process 0 receive local matrix local_A from other processess */ void Write_local_matrices_B( char* title /* in */, LOCAL_MATRIX_T* local_B /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_B.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_B); i++) { for (j = 0; j < Order(local_B); j++) fprintf(fp, "%20.15E ", Entry(local_B,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,j,i)); // switch rows and colums in local_B, for column major storage fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_B, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_B */ /*********************************************************/ /* Recive and Write Local Matrix C: * Process 0 print local matrix local_C * Other Processess send local matrix local_C to process 0 * And process 0 receive local matrix local_C from other processess */ void Write_local_matrices_C( char* title /* in */, LOCAL_MATRIX_T* local_C /* in */, GRID_INFO_T* grid /* in */) { FILE *fp; int coords[2]; int i, j; int source; MPI_Status status; // print by process No.0 in process mesh if (grid->my_rank == 0) { fp = fopen("local_C.dat","w+"); printf("%s\n", title); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", grid->my_rank, grid->my_row, grid->my_col); for (i = 0; i < Order(local_C); i++) { for (j = 0; j < Order(local_C); j++) fprintf(fp, "%20.15E ", Entry(local_C,i,j)); fprintf(fp, "\n"); } for (source = 1; source < grid->p; source++) { MPI_Recv(temp_mat, 1, local_matrix_mpi_t, source, 0, grid->comm, &status); MPI_Cart_coords(grid->comm, source, 2, coords); fprintf(fp, "Process %d > grid_row = %d, grid_col = %d\n", source, coords[0], coords[1]); for (i = 0; i < Order(temp_mat); i++) { for (j = 0; j < Order(temp_mat); j++) fprintf(fp, "%20.15E ", Entry(temp_mat,i,j)); fprintf(fp, "\n"); } } fflush(stdout); fclose(fp); } else { MPI_Send(local_C, 1, local_matrix_mpi_t, 0, 0, grid->comm); } } /* Write_local_matrices_C */
depend-3.c
/* { dg-do compile } */ /* { dg-options "-fopenmp" } */ void bar (int a[10][10][10]); void foo (int a[10][10][10], int **b, int x) { int c[10][10][10]; #pragma omp task depend(out: a[2:4][3:0][:7]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp task depend(inout: b[:7][0:0][:0]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp task depend(in: c[:][:][10:]) /* { dg-error "zero length array section" } */ bar (c); #pragma omp task depend(out: a[2:4][3:0][:x]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp task depend(inout: b[:x][0:0][:0]) /* { dg-error "zero length array section" } */ bar (a); #pragma omp task depend(in: c[:][x-2:x][10:]) /* { dg-error "zero length array section" } */ bar (c); }
ompdynamic.c
#include <stdio.h> #include <stdlib.h> int main(int argc, char* argv[]) { printf("OpenMP Dynamic Test Case\n"); const int n = 1024; double* the_array = (double*) malloc(sizeof(double) * n); int i = 0; for(i = 0; i < n; ++i) { the_array[i] = i; } double sum; #pragma omp parallel for schedule(dynamic) reduction(+:sum) for(i = 0; i < n; ++i) { sum += the_array[i]; } printf("Dynamic vector sum is: %f\n", sum); }
matmul.c
//===-- matmul.c - Different implementations of matrix multiplies -*- C -*-===// // // Part of the LOMP Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdlib.h> #include <omp.h> #define N 3072 #define DUMP_MATRIX 0 void matmul_seq(double * C, double * A, double * B, size_t n) { for (size_t i = 0; i < n; ++i) { for (size_t k = 0; k < n; ++k) { for (size_t j = 0; j < n; ++j) { C[i * n + j] += A[i * n + k] * B[k * n + j]; } } } } void matmul_par(double * C, double * A, double * B, size_t n) { #pragma omp parallel for schedule(static, 8) firstprivate(n) for (size_t i = 0; i < n; ++i) { for (size_t k = 0; k < n; ++k) { for (size_t j = 0; j < n; ++j) { C[i * n + j] += A[i * n + k] * B[k * n + j]; } } } } void init_mat(double * C, double * A, double * B, size_t n) { for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < n; ++j) { C[i * n + j] = 0.0; A[i * n + j] = 0.5; B[i * n + j] = 0.25; } } } void dump_mat(double * mtx, size_t n) { for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < n; ++j) { printf("%f ", mtx[i * n + j]); } printf("\n"); } } double sum_mat(double * mtx, size_t n) { double sum = 0.0; for (size_t i = 0; i < n; ++i) { for (size_t j = 0; j < n; ++j) { sum += mtx[i * n + j]; } } return sum; } int main(void) { double ts, te; double t_seq; double * C; double * A; double * B; C = (double *)malloc(sizeof(*C) * N * N); A = (double *)malloc(sizeof(*A) * N * N); B = (double *)malloc(sizeof(*B) * N * N); init_mat(C, A, B, N); ts = omp_get_wtime(); matmul_seq(C, A, B, N); te = omp_get_wtime(); #if DUMP_MATRIX dump_mat(C, N); #endif t_seq = te - ts; printf("Sum of matrix (serial): %f, wall time %lf, speed-up %.2lf\n", sum_mat(C, N), (te - ts), t_seq / (te - ts)); init_mat(C, A, B, N); ts = omp_get_wtime(); matmul_par(C, A, B, N); te = omp_get_wtime(); #if DUMP_MATRIX dump_mat(C, N); #endif printf("Sum of matrix (parallel): %f, wall time %lf, speed-up %.2lf\n", sum_mat(C, N), (te - ts), t_seq / (te - ts)); return EXIT_SUCCESS; }
main_seqval.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ /* These need to be before any possible inclusions of stdint.h or inttypes.h. * */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include "../generator/make_graph.h" #include "../generator/utils.h" #include "common.h" #include <math.h> #include <mpi.h> #include <assert.h> #include <string.h> #include <stdlib.h> #include <stddef.h> #include <stdio.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> static int compare_doubles(const void* a, const void* b) { double aa = *(const double*)a; double bb = *(const double*)b; return (aa < bb) ? -1 : (aa == bb) ? 0 : 1; } enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST}; static void get_statistics(const double x[], int n, double r[s_LAST]) { double temp; int i; /* Compute mean. */ temp = 0; for (i = 0; i < n; ++i) temp += x[i]; temp /= n; r[s_mean] = temp; /* Compute std. dev. */ temp = 0; for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]); temp /= n - 1; r[s_std] = sqrt(temp); /* Sort x. */ double* xx = (double*)xmalloc(n * sizeof(double)); memcpy(xx, x, n * sizeof(double)); qsort(xx, n, sizeof(double), compare_doubles); /* Get order statistics. */ r[s_minimum] = xx[0]; r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5; r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5; r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5; r[s_maximum] = xx[n - 1]; /* Clean up. */ free(xx); } static inline int64_t get_pred_from_pred_entry(int64_t val) { return (val << 16) >> 16; } /* Returns true if result is valid. Also, updates high 16 bits of each element * of pred to contain the BFS level number (or -1 if not visited) of each * vertex; this is based on the predecessor map if the user didn't provide it. * */ int validate_bfs_result_seq(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const int64_t root, int64_t* const pred, int64_t* const edge_visit_count_ptr, int64_t const max_used_vertex) { assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); *edge_visit_count_ptr = 0; /* Ensure it is a valid pointer */ int ranges_ok = check_value_ranges(nglobalverts, nlocalverts, pred); if (root < 0 || root >= nglobalverts) { fprintf(stderr, "%d: Validation error: root vertex %" PRId64 " is invalid.\n", rank, root); ranges_ok = 0; } if (!ranges_ok) return 0; /* Fail */ int validation_passed = 1; int root_owner; size_t root_local; get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local); int root_is_mine = (root_owner == rank); /* Get maximum values so loop counts are consistent across ranks. */ uint64_t maxlocalverts_ui = nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &maxlocalverts_ui, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); size_t maxlocalverts = (size_t)maxlocalverts_ui; ptrdiff_t max_bufsize = tuple_graph_max_bufsize(tg); ptrdiff_t edge_chunk_size = ptrdiff_min(HALF_CHUNKSIZE, max_bufsize); assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); /* combine results from all processes */ int64_t* restrict pred_vtx = NULL; { int irank; uint64_t i; int64_t nlocalvertsMax=nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &nlocalvertsMax, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); if(rank==0) { pred_vtx = (int64_t*)xmalloc(nglobalverts * sizeof(int64_t)); int64_t* pred_tmp; int64_t nlocalvertsRemote; pred_tmp=pred; nlocalvertsRemote=nlocalverts; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(irank!=0) { MPI_Recv(&nlocalvertsRemote, 1, MPI_UINT64_T, irank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(pred_tmp, nlocalvertsRemote, MPI_UINT64_T, irank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); //printf("%d %" PRId64 " \n",rank,nlocalvertsRemote); } for(i=0;i<nlocalvertsRemote ;i++) { pred_vtx[vertex_to_global_for_pred(irank,i)]=get_pred_from_pred_entry(pred_tmp[i]); } if(irank==0) pred_tmp = (int64_t*)xmalloc(nlocalvertsMax * sizeof(int64_t)); } xfree(pred_tmp); } else { for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(rank==irank) { MPI_Send(&nlocalverts, 1, MPI_UINT64_T, 0, 0, MPI_COMM_WORLD); MPI_Send(pred, nlocalverts, MPI_UINT64_T, 0, 1, MPI_COMM_WORLD); } } } { int irank; uint64_t i; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); //if(rank==irank) // for(i=0;i<nlocalverts ;i++) // fprintf(stderr, "%d %" PRId64 " %" PRId64 " %" PRId64 "\n", rank,i,get_pred_from_pred_entry(pred[i]),vertex_to_global_for_pred(rank,i)); } } } int64_t nedge_traversed; if(rank==0) { uint64_t i, max_bfsvtx=0; /*for(i=0;i<tg->edgememory_size ;i++) { if(tg->edgememory[i].v0>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v0; if(tg->edgememory[i].v1>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v1; }*/ /*int64_t* restrict pred_vtx = (int64_t*)xmalloc((max_used_vertex+1) * sizeof(int64_t)); for(i=0;i<=max_used_vertex ;i++) { pred_vtx[i]=get_pred_from_pred_entry(pred[i]); }*/ nedge_traversed=verify_bfs_tree (pred_vtx, max_used_vertex, root, tg->edgememory, tg->nglobaledges); if(nedge_traversed<0) { fprintf(stderr, "Validation error: code %" PRId64 ".\n", nedge_traversed); validation_passed=0; } } if(rank==0) { xfree(pred_vtx); } MPI_Allreduce(MPI_IN_PLACE, &nedge_traversed, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); *edge_visit_count_ptr=nedge_traversed; /* Collect the global validation result. */ MPI_Allreduce(MPI_IN_PLACE, &validation_passed, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); return validation_passed; } int main(int argc, char** argv) { MPI_Init(&argc, &argv); setup_globals(); /* Parse arguments. */ int SCALE = 16; int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ int num_bfs_roots = 64; int bCompareMD5=1; int bRunPerf=1; int bRunVal=1; float timeForPerf=300.0; int numberOfCyclesForPerf=300; //uint8_t refMD5[16]; int64_t* refEdgeCounts = NULL; int64_t* refBFS_Roots = NULL; if ( !(argc == 2 || argc == 3)){ if (rank == 0) fprintf(stderr, "Usage: %s input_file [number of threads]\n", argv[0]); //fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); MPI_Abort(MPI_COMM_WORLD, 1); } if ( argc == 3){ int threads=atoi(argv[2]); #ifdef _OPENMP omp_set_num_threads(threads); #else if(threads!=1) fprintf(stderr, "ERROR: %s compiled without OpenMP\n", argv[0]); #endif } { int iRead=0; int i; FILE *input_file; char cbuf[256]; if (rank == 0) fprintf(stderr, "Reading input from %s\n",argv[1]); input_file=fopen(argv[1],"r"); if(input_file==NULL){ if (rank == 0) fprintf(stderr, "Error : can no open %s file\n",argv[1]); MPI_Barrier(MPI_COMM_WORLD); MPI_Abort(MPI_COMM_WORLD, 1); } fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&SCALE); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&edgefactor); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&num_bfs_roots); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bCompareMD5); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bRunPerf); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bRunVal); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%f",&timeForPerf); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&numberOfCyclesForPerf); //fgets(cbuf,256,input_file); //for (i = 0; i < 16; i++) // iRead+=sscanf(cbuf+i*2,"%2x",&refMD5[i]); //refMD5[i]=cbuf[i]; refEdgeCounts = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); refBFS_Roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); for (i = 0; i < num_bfs_roots; i++){ fgets(cbuf,256,input_file); iRead+=sscanf(cbuf,"%lu %lu ",refBFS_Roots+i,refEdgeCounts+i); } //printf("%d %d\n",rank,iRead); //printf("%d %d\n",rank,SCALE); //printf("%d %d\n",rank,edgefactor); if (rank == 0){ fprintf(stderr, "\tScale: %d\n",SCALE); fprintf(stderr, "\tEdgefactor %d\n",edgefactor); fprintf(stderr, "\tNumber of BFS roots: %d\n",num_bfs_roots); fprintf(stderr, "\tCompare md5 on initial edge list: %d\n",bCompareMD5); fprintf(stderr, "\tRun performance section: %d\n",bRunPerf); fprintf(stderr, "\tRun validation: %d\n",bRunVal); fprintf(stderr, "\tTime for performance section in seconds: %f\n",timeForPerf); fprintf(stderr, "\tMax number of cycles: %d\n",numberOfCyclesForPerf); fprintf(stderr, "\tNumber of MPI processes: %d\n",size); #ifdef _OPENMP fprintf(stderr, "\tMax number of threads per MPI process: %d\n",omp_get_max_threads()); #else fprintf(stderr, "\tMax number of threads per MPI process: compiled without OpenMP\n"); #endif //fprintf(stderr, "\tReffrence md5 on initial edge list: "); //for (i = 0; i < 16; i++) // fprintf(stderr, "%2.2x", refMD5[i]); //fprintf(stderr, "\n"); } fclose(input_file); //MPI_Barrier(MPI_COMM_WORLD); //MPI_Abort(MPI_COMM_WORLD, 1); } // int SCALE = 16; // int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ // if (argc >= 2) SCALE = atoi(argv[1]); // if (argc >= 3) edgefactor = atoi(argv[2]); // if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) { // if (rank == 0) { // fprintf(stderr, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); // } // MPI_Abort(MPI_COMM_WORLD, 1); // } uint64_t seed1 = 2, seed2 = 3; const char* filename = getenv("TMPFILE"); const int reuse_file = getenv("REUSEFILE")? 1 : 0; /* If filename is NULL, store data in memory */ tuple_graph tg; tg.nglobaledges = (int64_t)(edgefactor) << SCALE; int64_t nglobalverts = (int64_t)(1) << SCALE; tg.data_in_file = (filename != NULL); tg.write_file = 1; if (tg.data_in_file) { int is_opened = 0; int mode = MPI_MODE_RDWR | MPI_MODE_EXCL | MPI_MODE_UNIQUE_OPEN; if (!reuse_file) { mode |= MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE; } else { MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_RETURN); if (MPI_File_open(MPI_COMM_WORLD, (char*)filename, mode, MPI_INFO_NULL, &tg.edgefile)) { mode |= MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_DELETE_ON_CLOSE; } else { MPI_Offset size; MPI_File_get_size(tg.edgefile, &size); if (size == tg.nglobaledges * sizeof(packed_edge)) { is_opened = 1; tg.write_file = 0; } else /* Size doesn't match, assume different parameters. */ MPI_File_close (&tg.edgefile); } } MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL); if (!is_opened) { MPI_File_open(MPI_COMM_WORLD, (char*)filename, mode, MPI_INFO_NULL, &tg.edgefile); MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge)); } MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL); MPI_File_set_atomicity(tg.edgefile, 0); } /* Make the raw graph edges. */ /* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by * validator). */ //int num_bfs_roots = 64; int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); int64_t max_used_vertex = 0; double make_graph_start = MPI_Wtime(); { /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(seed1, seed2, seed); /* As the graph is being generated, also keep a bitmap of vertices with * incident edges. We keep a grid of processes, each row of which has a * separate copy of the bitmap (distributed among the processes in the * row), and then do an allreduce at the end. This scheme is used to avoid * non-local communication and reading the file separately just to find BFS * roots. */ MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE; int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT); if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) { bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT); } int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes; int nrows = size / ranks_per_row; int my_row = -1, my_col = -1; unsigned char* restrict has_edge = NULL; MPI_Comm cart_comm; { int dims[2] = {size / ranks_per_row, ranks_per_row}; int periods[2] = {0, 0}; MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm); } int in_generating_rectangle = 0; if (cart_comm != MPI_COMM_NULL) { in_generating_rectangle = 1; { int dims[2], periods[2], coords[2]; MPI_Cart_get(cart_comm, 2, dims, periods, coords); my_row = coords[0]; my_col = coords[1]; } MPI_Comm this_col; MPI_Comm_split(cart_comm, my_col, my_row, &this_col); MPI_Comm_free(&cart_comm); has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes); memset(has_edge, 0, bitmap_size_in_bytes); /* Every rank in a given row creates the same vertices (for updating the * bitmap); only one writes them to the file (or final memory buffer). */ packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge)); MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows; /* fprintf(stderr, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row); */ if (tg.data_in_file) { tg.edgememory_size = 0; tg.edgememory = NULL; } else { int my_pos = my_row + my_col * nrows; int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ? (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) : -1; int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE; int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) + FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) + (my_pos == last_pos ? edges_left : 0); /* fprintf(stderr, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */ tg.edgememory_size = nedges; tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge)); } MPI_Offset block_idx; for (block_idx = 0; block_idx < block_limit; ++block_idx) { /* fprintf(stderr, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */ MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges); MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE); packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ? tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) : buf; /* fprintf(stderr, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */ if (!tg.data_in_file && block_idx % ranks_per_row == my_col) { assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size); } if (tg.write_file) { generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf); if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */ MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE); } } else { /* All read rather than syncing up for a row broadcast. */ MPI_File_read_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE); } ptrdiff_t i; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < edge_count; ++i) { int64_t src = get_v0_from_edge(&actual_buf[i]); int64_t tgt = get_v1_from_edge(&actual_buf[i]); if (src == tgt) continue; if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT)); } if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT)); } } } free(buf); #if 0 /* The allreduce for each root acts like we did this: */ MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col); #endif MPI_Comm_free(&this_col); } else { tg.edgememory = NULL; tg.edgememory_size = 0; } MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); /* Find roots and max used vertex */ { uint64_t counter = 0; int bfs_root_idx; for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root; while (1) { double d[2]; make_random_numbers(2, seed1, seed2, counter, d); root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts; counter += 2; if (counter > 2 * nglobalverts) break; int is_duplicate = 0; int i; for (i = 0; i < bfs_root_idx; ++i) { if (root == bfs_roots[i]) { is_duplicate = 1; break; } } if (is_duplicate) continue; /* Everyone takes the same path here */ int root_ok = 0; if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) { root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0; } MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); if (root_ok) break; } bfs_roots[bfs_root_idx] = root; if((refBFS_Roots!=NULL) && (rank==0)){ if(refBFS_Roots[bfs_root_idx] != bfs_roots[bfs_root_idx]) fprintf(stderr,"ERROR: BFS roots do not match reffrence (Ref: %lu Here: %lu)\n",refBFS_Roots[bfs_root_idx], bfs_roots[bfs_root_idx]); } } num_bfs_roots = bfs_root_idx; /* Find maximum non-zero-degree vertex. */ { int64_t i; max_used_vertex = 0; if (in_generating_rectangle) { for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) { if (i > nglobalverts) continue; if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) { max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes; break; } } } MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); } } if (in_generating_rectangle) { MPI_Free_mem(has_edge); } if (tg.data_in_file && tg.write_file) { MPI_File_sync(tg.edgefile); } } double make_graph_stop = MPI_Wtime(); double make_graph_time = make_graph_stop - make_graph_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stderr, "graph_generation: %f s\n", make_graph_time); } /* Make user's graph data structure. */ double data_struct_start = MPI_Wtime(); make_graph_data_structure(&tg); double data_struct_stop = MPI_Wtime(); double data_struct_time = data_struct_stop - data_struct_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stderr, "construction_time: %f s\n", data_struct_time); } /* Number of edges visited in each BFS; a double so get_statistics can be * used directly. */ double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double)); int64_t* edge_counts_ul = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); /* Run BFS. */ int validation_passed = 1; double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); uint64_t nlocalverts = get_nlocalverts_for_pred(); int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); int bfs_root_idx; int CyclesPassed=0; int ValidationStep=0; if(bRunPerf==0) { ValidationStep=1; numberOfCyclesForPerf=1; } for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) bfs_times[bfs_root_idx]=0.0; double performance_start = MPI_Wtime(); while(1){ if (rank == 0)fprintf(stderr, "Starting cycle %d.\n", CyclesPassed); for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root = bfs_roots[bfs_root_idx]; if ((rank == 0)&&(ValidationStep)) fprintf(stderr, "Running BFS %d\n", bfs_root_idx); /* Clear the pred array. */ memset(pred, 0, nlocalverts * sizeof(int64_t)); /* Do the actual BFS. */ double bfs_start = MPI_Wtime(); run_bfs(root, &pred[0]); double bfs_stop = MPI_Wtime(); bfs_times[bfs_root_idx] += bfs_stop - bfs_start; if ((rank == 0)&&(ValidationStep)) fprintf(stderr, "Time for BFS %d is %f\n", bfs_root_idx, bfs_stop - bfs_start); /* Validate result. */ //if (!getenv("SKIP_VALIDATION")) { if (ValidationStep) { if (rank == 0) fprintf(stderr, "Validating BFS %d\n", bfs_root_idx); double validate_start = MPI_Wtime(); int64_t edge_visit_count; int validation_passed_one = validate_bfs_result_seq(&tg, nglobalverts, nlocalverts, root, pred, &edge_visit_count,max_used_vertex); //int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); double validate_stop = MPI_Wtime(); validate_times[bfs_root_idx] = validate_stop - validate_start; if (rank == 0) fprintf(stderr, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]); edge_counts[bfs_root_idx] = (double)edge_visit_count; edge_counts_ul[bfs_root_idx] = edge_visit_count; if (rank == 0) fprintf(stderr, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / bfs_times[bfs_root_idx]); if((refEdgeCounts!=NULL) && (rank==0)){ if(refEdgeCounts[bfs_root_idx]!=edge_counts_ul[bfs_root_idx]) fprintf(stderr,"ERROR: Edge count do not match reference (Ref: %lu Here: %lu)\n",refEdgeCounts[bfs_root_idx], edge_counts_ul[bfs_root_idx]); } if (!validation_passed_one) { validation_passed = 0; if (rank == 0) fprintf(stderr, "Validation failed for this BFS root; skipping rest.\n"); break; } } } CyclesPassed++; if((MPI_Wtime()-performance_start>=timeForPerf)||(CyclesPassed>=numberOfCyclesForPerf)){ if(bRunVal){ if(ValidationStep==0) ValidationStep=1; else break; } else break; } if (validation_passed==0) break; } if (rank == 0) fprintf(stderr,"Completed %d cycles\n", CyclesPassed); for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { bfs_times[bfs_root_idx]/=CyclesPassed; } /* Print results. */ if (rank == 0) { int i; for (i = 0; i < num_bfs_roots; ++i) fprintf(stdout, "%lu %lu # [%2d] bfs_roots edge_visit_count\n",bfs_roots[i],edge_counts_ul[i],i); if (!validation_passed) { fprintf(stdout, "No results printed for invalid run.\n"); } else { int i; fprintf(stdout, "SCALE: %d\n", SCALE); fprintf(stdout, "edgefactor: %d\n", edgefactor); fprintf(stdout, "NBFS: %d\n", num_bfs_roots); fprintf(stdout, "graph_generation: %g\n", make_graph_time); fprintf(stdout, "num_mpi_processes: %d\n", size); fprintf(stdout, "construction_time: %g\n", data_struct_time); double stats[s_LAST]; get_statistics(bfs_times, num_bfs_roots, stats); fprintf(stdout, "min_time: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_time: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_time: %g\n", stats[s_maximum]); fprintf(stdout, "mean_time: %g\n", stats[s_mean]); fprintf(stdout, "stddev_time: %g\n", stats[s_std]); get_statistics(edge_counts, num_bfs_roots, stats); fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]); fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]); fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]); fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]); fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]); fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]); double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double)); for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i]; get_statistics(secs_per_edge, num_bfs_roots, stats); fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]); fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]); fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]); fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]); fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]); fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]); /* Formula from: * Title: The Standard Errors of the Geometric and Harmonic Means and * Their Application to Index Numbers * Author(s): Nilan Norris * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448 * Publisher(s): Institute of Mathematical Statistics * Stable URL: http://www.jstor.org/stable/2235723 * (same source as in specification). */ fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1))); free(secs_per_edge); secs_per_edge = NULL; free(edge_counts); edge_counts = NULL; get_statistics(validate_times, num_bfs_roots, stats); fprintf(stdout, "min_validate: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_validate: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_validate: %g\n", stats[s_maximum]); fprintf(stdout, "mean_validate: %g\n", stats[s_mean]); fprintf(stdout, "stddev_validate: %g\n", stats[s_std]); #if 0 for (i = 0; i < num_bfs_roots; ++i) { fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]); } #endif } } MPI_Free_mem(pred); free(bfs_roots); free_graph_data_structure(); if (tg.data_in_file) { MPI_File_close(&tg.edgefile); } else { free(tg.edgememory); tg.edgememory = NULL; } free(bfs_times); free(validate_times); free(edge_counts_ul); cleanup_globals(); MPI_Finalize(); return 0; }
is.c
/*-------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - IS This benchmark is an OpenMP C version of the NPB IS code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to pdp-openmp@rwcp.or.jp Information on OpenMP activities at RWCP is available at: http://pdplab.trc.rwcp.or.jp/pdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http://www.nas.nasa.gov/NAS/NPB/ --------------------------------------------------------------------*/ /*-------------------------------------------------------------------- Author: M. Yarrow OpenMP C version: S. Satoh --------------------------------------------------------------------*/ #include "npbparams.h" #include <nautilus/nautilus.h> #include <nautilus/shell.h> #include <nautilus/thread.h> #include "../paging_benchmark.h" //#include <stdlib.h> //#include <stdio.h> //#if defined(_OPENMP) //#include <omp.h> //#endif /* _OPENMP */ /*****************************************************************/ /* For serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (Note: Mechanism not understood, probably cache related) */ /* Example: SP2-66MhzWN: 50% speedup with buckets */ /* Example: SGI Indy5000: 50% slowdown with buckets */ /* Example: SGI O2000: 400% slowdown with buckets (Wow!) */ /*****************************************************************/ /* #define USE_BUCKETS */ /* buckets are not used in the OpenMP C version */ /******************/ /* default values */ /******************/ #ifndef CLASS #define CLASS 'S' #endif /*************/ /* CLASS S */ /*************/ #if CLASS == 'S' #define TOTAL_KEYS_LOG_2 16 #define MAX_KEY_LOG_2 11 #define NUM_BUCKETS_LOG_2 9 #endif /*************/ /* CLASS W */ /*************/ #if CLASS == 'W' #define TOTAL_KEYS_LOG_2 20 #define MAX_KEY_LOG_2 16 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS A */ /*************/ #if CLASS == 'A' #define TOTAL_KEYS_LOG_2 23 #define MAX_KEY_LOG_2 19 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS B */ /*************/ #if CLASS == 'B' #define TOTAL_KEYS_LOG_2 25 #define MAX_KEY_LOG_2 21 #define NUM_BUCKETS_LOG_2 10 #endif /*************/ /* CLASS C */ /*************/ #if CLASS == 'C' #define TOTAL_KEYS_LOG_2 27 #define MAX_KEY_LOG_2 23 #define NUM_BUCKETS_LOG_2 10 #endif #define TOTAL_KEYS (1 << TOTAL_KEYS_LOG_2) #define MAX_KEY (1 << MAX_KEY_LOG_2) #define NUM_BUCKETS (1 << NUM_BUCKETS_LOG_2) #define NUM_KEYS TOTAL_KEYS #define SIZE_OF_BUFFERS NUM_KEYS #define MAX_ITERATIONS 10 #define TEST_ARRAY_SIZE 5 /*************************************/ /* Typedef: if necessary, change the */ /* size of int here by changing the */ /* int type to, say, long */ /*************************************/ typedef int INT_TYPE; /********************/ /* Some global info */ /********************/ INT_TYPE *key_buff_ptr_global; /* used by full_verify to get */ /* copies of rank info */ int passed_verification; /************************************/ /* These are the three main arrays. */ /* See SIZE_OF_BUFFERS def above */ /************************************/ INT_TYPE key_array[SIZE_OF_BUFFERS], key_buff1[SIZE_OF_BUFFERS], key_buff2[SIZE_OF_BUFFERS], partial_verify_vals[TEST_ARRAY_SIZE]; #ifdef USE_BUCKETS INT_TYPE bucket_size[NUM_BUCKETS], bucket_ptrs[NUM_BUCKETS]; #endif /**********************/ /* Partial verif info */ /**********************/ INT_TYPE test_index_array[TEST_ARRAY_SIZE], test_rank_array[TEST_ARRAY_SIZE], S_test_index_array[TEST_ARRAY_SIZE] = {48427,17148,23627,62548,4431}, S_test_rank_array[TEST_ARRAY_SIZE] = {0,18,346,64917,65463}, W_test_index_array[TEST_ARRAY_SIZE] = {357773,934767,875723,898999,404505}, W_test_rank_array[TEST_ARRAY_SIZE] = {1249,11698,1039987,1043896,1048018}, A_test_index_array[TEST_ARRAY_SIZE] = {2112377,662041,5336171,3642833,4250760}, A_test_rank_array[TEST_ARRAY_SIZE] = {104,17523,123928,8288932,8388264}, B_test_index_array[TEST_ARRAY_SIZE] = {41869,812306,5102857,18232239,26860214}, B_test_rank_array[TEST_ARRAY_SIZE] = {33422937,10244,59149,33135281,99}, C_test_index_array[TEST_ARRAY_SIZE] = {44172927,72999161,74326391,129606274,21736814}, C_test_rank_array[TEST_ARRAY_SIZE] = {61147,882988,266290,133997595,133525895}; /***********************/ /* function prototypes */ /***********************/ static double randlc( double *X, double *A ); void full_verify( void ); /* * FUNCTION RANDLC (X, A) * * This routine returns a uniform pseudorandom double precision number in the * range (0, 1) by using the linear congruential generator * * x_{k+1} = a x_k (mod 2^46) * * where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers * before repeating. The argument A is the same as 'a' in the above formula, * and X is the same as x_0. A and X must be odd double precision integers * in the range (1, 2^46). The returned value RANDLC is normalized to be * between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain * the new seed x_1, so that subsequent calls to RANDLC using the same * arguments will generate a continuous sequence. * * This routine should produce the same results on any computer with at least * 48 mantissa bits in double precision floating point data. On Cray systems, * double precision should be disabled. * * David H. Bailey October 26, 1990 * * IMPLICIT DOUBLE PRECISION (A-H, O-Z) * SAVE KS, R23, R46, T23, T46 * DATA KS/0/ * * If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46, * T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than * by merely using the ** operator, in order to insure that the results are * exact on all systems. This code assumes that 0.5D0 is represented exactly. */ /*****************************************************************/ /************* R A N D L C ************/ /************* ************/ /************* portable random number generator ************/ /*****************************************************************/ static double randlc(X, A) double *X; double *A; { static int KS=0; static double R23, R46, T23, T46; double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; int i, j; if (KS == 0) { R23 = 1.0; R46 = 1.0; T23 = 1.0; T46 = 1.0; for (i=1; i<=23; i++) { R23 = 0.50 * R23; T23 = 2.0 * T23; } for (i=1; i<=46; i++) { R46 = 0.50 * R46; T46 = 2.0 * T46; } KS = 1; } /* Break A into two parts such that A = 2^23 * A1 + A2 and set X = N. */ T1 = R23 * *A; j = T1; A1 = j; A2 = *A - T23 * A1; /* Break X into two parts such that X = 2^23 * X1 + X2, compute Z = A1 * X2 + A2 * X1 (mod 2^23), and then X = 2^23 * Z + A2 * X2 (mod 2^46). */ T1 = R23 * *X; j = T1; X1 = j; X2 = *X - T23 * X1; T1 = A1 * X2 + A2 * X1; j = R23 * T1; T2 = j; Z = T1 - T23 * T2; T3 = T23 * Z + A2 * X2; j = R46 * T3; T4 = j; *X = T3 - T46 * T4; return(R46 * *X); } /*****************************************************************/ /************* C R E A T E _ S E Q ************/ /*****************************************************************/ void create_seq( double seed, double a ) { double x; int i, j, k; k = MAX_KEY/4; for (i=0; i<NUM_KEYS; i++) { x = randlc(&seed, &a); x += randlc(&seed, &a); x += randlc(&seed, &a); x += randlc(&seed, &a); key_array[i] = k*x; } } /*****************************************************************/ /************* F U L L _ V E R I F Y ************/ /*****************************************************************/ void full_verify() { INT_TYPE i, j; INT_TYPE k; INT_TYPE m, unique_keys; /* Now, finally, sort the keys: */ for( i=0; i<NUM_KEYS; i++ ) key_array[--key_buff_ptr_global[key_buff2[i]]] = key_buff2[i]; /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j = 0; for( i=1; i<NUM_KEYS; i++ ) if( key_array[i-1] > key_array[i] ) j++; if( j != 0 ) { printf( "Full_verify: number of keys out of sort: %d\n", j ); } else passed_verification++; } /*****************************************************************/ /************* R A N K ****************/ /*****************************************************************/ void rank( int iteration ) { INT_TYPE i, j, k; INT_TYPE l, m; INT_TYPE shift = MAX_KEY_LOG_2 - NUM_BUCKETS_LOG_2; INT_TYPE key; INT_TYPE min_key_val, max_key_val; INT_TYPE prv_buff1[MAX_KEY]; #pragma omp master { key_array[iteration] = iteration; key_array[iteration+MAX_ITERATIONS] = MAX_KEY - iteration; /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) partial_verify_vals[i] = key_array[test_index_array[i]]; /* Clear the work array */ for( i=0; i<MAX_KEY; i++ ) key_buff1[i] = 0; } #pragma omp barrier for (i=0; i<MAX_KEY; i++) prv_buff1[i] = 0; /* Copy keys into work array; keys in key_array will be reused each iter. */ #pragma omp for nowait for( i=0; i<NUM_KEYS; i++ ) { key_buff2[i] = key_array[i]; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ prv_buff1[key_buff2[i]]++; /* Now they have individual key */ } /* population */ for( i=0; i<MAX_KEY-1; i++ ) prv_buff1[i+1] += prv_buff1[i]; //mjc #pragma omp critical(is) { for( i=0; i<MAX_KEY; i++ ) key_buff1[i] += prv_buff1[i]; } /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ #pragma omp barrier #pragma omp master { /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) { k = partial_verify_vals[i]; /* test vals were put here */ if( 0 <= k && k <= NUM_KEYS-1 ) switch( CLASS ) { case 'S': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'W': if( i < 2 ) { if( key_buff1[k-1] != test_rank_array[i]+(iteration-2) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'A': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+(iteration-1) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-(iteration-1) ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'B': if( i == 1 || i == 2 || i == 4 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; case 'C': if( i <= 2 ) { if( key_buff1[k-1] != test_rank_array[i]+iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } else { if( key_buff1[k-1] != test_rank_array[i]-iteration ) { printf( "Failed partial verification: " "iteration %d, test key %d\n", iteration, i ); } else passed_verification++; } break; } } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if( iteration == MAX_ITERATIONS ) key_buff_ptr_global = key_buff1; } /* end master */ } /*****************************************************************/ /************* M A I N ****************/ /*****************************************************************/ static int program_IS(char *_buf, void* _priv); static struct shell_cmd_impl nas_is_impl = { .cmd = "nas-is", .help_str = "NAS parallel benchmark IS", .handler = program_IS, }; nk_register_shell_cmd(nas_is_impl); #ifdef NAUT_CONFIG_ASPACE_PAGING int program_IS_paging(char * _buf, void *_priv){ return paging_wrapper(_buf, _priv, &program_IS); } static struct shell_cmd_impl nas_is_paging_impl = { .cmd = "nas-is-paging", .help_str = "NAS parallel benchmark IS with paging", .handler = program_IS_paging, }; nk_register_shell_cmd(nas_is_paging_impl); #endif // int program_IS_paging(char * _buf, void *_priv){ // nk_thread_id_t t; // t = nk_thread_fork(); // if(t==NK_BAD_THREAD_ID){ // printf("Failed to fork thread\n"); // return 0; // } // if (t==0) { // // child thread // char buf[32]; // struct nk_thread *t = get_cur_thread(); // nk_thread_name(get_cur_thread(),buf); // get_cur_thread()->vc = get_cur_thread()->parent->vc; // printf("Hello from forked thread tid %lu \n", t->tid); // // if the function being forked is inlined // // we must explicitly invoke // // nk_thread_exit(0); // // here // #ifdef NAUT_CONFIG_ASPACE_PAGING // nk_aspace_t *old_aspace = t->aspace; // printf("The old aspace is %p\n", old_aspace); // if(old_aspace){ // printf("The remove will be called\n"); // } // nk_aspace_characteristics_t c; // if (nk_aspace_query("paging",&c)) { // printf("failed to find paging implementation\n"); // return -1; // } // // create a new address space for this shell thread // nk_aspace_t *mas = nk_aspace_create("paging","paging for NAS benchmark",&c); // if (!mas) { // printf("failed to create new address space\n"); // return -1; // } // nk_aspace_region_t r; // // create a 1-1 region mapping all of physical memory // // so that the kernel can work when that thread is active // r.va_start = 0; // r.pa_start = 0; // r.len_bytes = 0x100000000UL; // first 4 GB are mapped // // set protections for kernel // // use EAGER to tell paging implementation that it needs to build all these PTs right now // r.protect.flags = NK_ASPACE_READ | NK_ASPACE_WRITE | NK_ASPACE_EXEC | NK_ASPACE_PIN | NK_ASPACE_KERN | NK_ASPACE_EAGER; // // now add the region // // this should build the page tables immediately // if (nk_aspace_add_region(mas,&r)) { // printf("failed to add initial eager region to address space\n"); // return -1; // } // if (nk_aspace_move_thread(mas)) { // printf("failed to move shell thread to new address space\n"); // return -1; // } // printf("Survived moving thread into its own address space\n"); // printf("Start executing the benchmakr\n"); // if(program_IS(_buf,_priv)==0){ // printf("Failed running benchmark\n"); // return -1; // } // printf("The mas aspace now is %p\n", mas); // if( nk_aspace_switch(old_aspace)){ // printf("Something wrong during swtiching to the old aspace\n"); // } // // stuck when move thread // //nk_aspace_move_thread(old_aspace); // printf("Move thread succeeded\n"); // if(nk_aspace_destroy(mas)){ // printf("Something wrong during destorying the new aspace\n"); // } // printf("Destory succeeded\n"); // #endif // return 0; // } // else { // printf("Hello from parent thread \n"); // // parent thread just forks again // } // if (nk_join_all_children(0)) { // printf("Failed to join forked threads on pass \n"); // return -1; // } // printf("Joined forked threads in pass\n"); // nk_sched_reap(1); // clean up unconditionally // return 0; // } int program_IS(char * _buf, void *_priv) { int i, iteration, itemp; int nthreads = 1; double timecounter, maxtime; /* Initialize the verification arrays if a valid class */ for( i=0; i<TEST_ARRAY_SIZE; i++ ) switch( CLASS ) { case 'S': test_index_array[i] = S_test_index_array[i]; test_rank_array[i] = S_test_rank_array[i]; break; case 'A': test_index_array[i] = A_test_index_array[i]; test_rank_array[i] = A_test_rank_array[i]; break; case 'W': test_index_array[i] = W_test_index_array[i]; test_rank_array[i] = W_test_rank_array[i]; break; case 'B': test_index_array[i] = B_test_index_array[i]; test_rank_array[i] = B_test_rank_array[i]; break; case 'C': test_index_array[i] = C_test_index_array[i]; test_rank_array[i] = C_test_rank_array[i]; break; }; /* Printout initial NPB info */ printf( "\n\n NAS Parallel Benchmarks 2.3 OpenMP C version" " - IS Benchmark\n\n" ); printf( " Size: %d (class %c)\n", TOTAL_KEYS, CLASS ); printf( " Iterations: %d\n", MAX_ITERATIONS ); /* Initialize timer */ timer_clear( 0 ); /* Generate random number sequence and subsequent keys on all procs */ create_seq( 314159265.00, /* Random number gen seed */ 1220703125.00 ); /* Random number gen mult */ /* Do one interation for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ #pragma omp parallel rank( 1 ); /* Start verification counter */ passed_verification = 0; if( CLASS != 'S' ) printf( "\n iteration\n" ); /* Start timer */ timer_start( 0 ); /* This is the main iteration */ #pragma omp parallel private(iteration) for( iteration=1; iteration<=MAX_ITERATIONS; iteration++ ) { #pragma omp master if( CLASS != 'S' ) printf( " %d\n", iteration ); rank( iteration ); #if defined(_OPENMP) #pragma omp master nthreads = omp_get_num_threads(); #endif /* _OPENMP */ } /* End of timing, obtain maximum time of all processors */ timer_stop( 0 ); timecounter = timer_read( 0 ); /* This tests that keys are in sequence: sorting of last ranked key seq occurs here, but is an untimed operation */ full_verify(); /* The final printout */ if( passed_verification != 5*MAX_ITERATIONS + 1 ) passed_verification = 0; c_print_results( "IS", CLASS, TOTAL_KEYS, 0, 0, MAX_ITERATIONS, nthreads, timecounter, ((double) (MAX_ITERATIONS*TOTAL_KEYS)) /timecounter/1000000., "keys ranked", passed_verification, NPBVERSION, COMPILETIME, CC, CLINK, C_LIB, C_INC, CFLAGS, CLINKFLAGS, "randlc"); // return added by Siyuan and Zhen return 0; /**************************/ } /* E N D P R O G R A M */ /**************************/
ocp_nlp_sqp.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "acados/ocp_nlp/ocp_nlp_sqp.h" // external #include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/sim/sim_common.h" #include "acados/utils/math.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; int size = 0; size += sizeof(ocp_nlp_sqp_opts); size += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); size += config->regularize->opts_calculate_size(); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } return size; } void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int N = dims->N; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_opts); opts->qp_solver_opts = qp_solver->opts_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += qp_solver->opts_calculate_size(qp_solver, dims->qp_solver); opts->regularize = config->regularize->opts_assign(c_ptr); c_ptr += config->regularize->opts_calculate_size(); // dynamics opts->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { opts->dynamics[ii] = dynamics[ii]->opts_assign(dynamics[ii], dims->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->opts_calculate_size(dynamics[ii], dims->dynamics[ii]); } // cost opts->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { opts->cost[ii] = cost[ii]->opts_assign(cost[ii], dims->cost[ii], c_ptr); c_ptr += cost[ii]->opts_calculate_size(cost[ii], dims->cost[ii]); } // constraints opts->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { opts->constraints[ii] = constraints[ii]->opts_assign(constraints[ii], dims->constraints[ii], c_ptr); c_ptr += constraints[ii]->opts_calculate_size(constraints[ii], dims->constraints[ii]); } assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; ocp_nlp_reg_config *regularize = config->regularize; int ii; int N = dims->N; // SQP opts opts->max_iter = 20; opts->tol_stat = 1e-8; opts->tol_eq = 1e-8; opts->tol_ineq = 1e-8; opts->tol_comp = 1e-8; opts->reuse_workspace = 1; #if defined(ACADOS_WITH_OPENMP) opts->num_threads = ACADOS_NUM_THREADS; #endif opts->ext_qp_res = 0; opts->qp_warm_start = 0; // submodules opts // qp solver qp_solver->opts_initialize_default(qp_solver, dims->qp_solver, opts->qp_solver_opts); // overwrite default qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_stat", &opts->tol_stat); qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_eq", &opts->tol_eq); qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq); qp_solver->opts_set(qp_solver, opts->qp_solver_opts, "tol_comp", &opts->tol_comp); // regularization regularize->opts_initialize_default(regularize, dims->regularize, opts->regularize); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_initialize_default(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_initialize_default(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_initialize_default(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; int ii; int N = dims->N; qp_solver->opts_update(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { dynamics[ii]->opts_update(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { cost[ii]->opts_update(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { constraints[ii]->opts_update(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } return; } void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_config *config = config_; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if(char_!=NULL) { module_length = char_-field; for(ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if(!strcmp(ptr_module, "qp")) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, field+module_length+1, value); if(!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "max_iter")) { int* max_iter = (int *) value; opts->max_iter = *max_iter; } else if (!strcmp(field, "reuse_workspace")) { int* reuse_workspace = (int *) value; opts->reuse_workspace = *reuse_workspace; } else if (!strcmp(field, "num_threads")) { int* num_threads = (int *) value; opts->num_threads = *num_threads; } else if (!strcmp(field, "tol_stat")) // TODO rename !!! { double* tol_stat = (double *) value; opts->tol_stat = *tol_stat; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_stat", value); } else if (!strcmp(field, "tol_eq")) // TODO rename !!! { double* tol_eq = (double *) value; opts->tol_eq = *tol_eq; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_eq", value); } else if (!strcmp(field, "tol_ineq")) // TODO rename !!! { double* tol_ineq = (double *) value; opts->tol_ineq = *tol_ineq; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_ineq", value); } else if (!strcmp(field, "tol_comp")) // TODO rename !!! { double* tol_comp = (double *) value; opts->tol_comp = *tol_comp; // pass to QP too config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "tol_comp", value); } else if (!strcmp(field, "exact_hess")) { int N = config->N; // cost for (ii=0; ii<=N; ii++) config->cost[ii]->opts_set(config->cost[ii], opts->cost[ii], "exact_hess", value); // dynamics for (ii=0; ii<N; ii++) config->dynamics[ii]->opts_set(config->dynamics[ii], opts->dynamics[ii], "compute_hess", value); // constraints TODO disabled for now as prevents convergence !!! // for (ii=0; ii<=N; ii++) // config->constraints[ii]->opts_set(config->constraints[ii], opts->constraints[ii], "compute_hess", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else { printf("\nerror: ocp_nlp_sqp_opts_set: wrong field: %s\n", field); exit(1); } } return; } void ocp_nlp_sqp_dynamics_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_dynamics_config *dyn_config = config->dynamics[stage]; dyn_config->opts_set(dyn_config, opts->dynamics[stage], field, value); return; } void ocp_nlp_sqp_cost_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_cost_config *cost_config = config->cost[stage]; cost_config->opts_set(cost_config, opts->cost[stage], field, value); return; } void ocp_nlp_sqp_constraints_opts_set(void *config_, void *opts_, int stage, const char *field, void *value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_constraints_config *constraints_config = config->constraints[stage]; constraints_config->opts_set(constraints_config, opts->constraints[stage], (char *) field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // extract dims int N = dims->N; // ocp_nlp_cost_dims **cost_dims = dims->cost; // int ny; int size = 0; size += sizeof(ocp_nlp_sqp_memory); // qp solver size += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization size += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // dynamics size += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { size += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints size += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { size += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // nlp res size += ocp_nlp_res_calculate_size(dims); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims); // stat int stat_m = opts->max_iter+1; int stat_n = 6; if(opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align // make_int_multiple_of(64, &size); return size; } void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // extract dims int N = dims->N; // ocp_nlp_cost_dims **cost_dims = dims->cost; // int ny; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_memory); // QP solver mem->qp_solver_mem = qp_solver->memory_assign(qp_solver, dims->qp_solver, opts->qp_solver_opts, c_ptr); c_ptr += qp_solver->memory_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // regularization mem->regularize_mem = config->regularize->memory_assign(config->regularize, dims->regularize, opts->regularize, c_ptr); c_ptr += config->regularize->memory_calculate_size(config->regularize, dims->regularize, opts->regularize); // nlp res mem->nlp_res = ocp_nlp_res_assign(dims, c_ptr); c_ptr += mem->nlp_res->memsize; // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims); // dynamics mem->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); for (int ii = 0; ii < N; ii++) { mem->dynamics[ii] = dynamics[ii]->memory_assign(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii], c_ptr); c_ptr += dynamics[ii]->memory_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost mem->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { mem->cost[ii] = cost[ii]->memory_assign(cost[ii], dims->cost[ii], opts->cost[ii], c_ptr); c_ptr += cost[ii]->memory_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints mem->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); for (int ii = 0; ii <= N; ii++) { mem->constraints[ii] = constraints[ii]->memory_assign( constraints[ii], dims->constraints[ii], opts->constraints[ii], c_ptr); c_ptr += constraints[ii]->memory_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } // stat mem->stat = (double *) c_ptr; mem->stat_m = opts->max_iter+1; mem->stat_n = 6; if(opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // loop index int ii; // extract dims int N = dims->N; int *nx = dims->nx; int *nu = dims->nu; int *nz = dims->nz; int size = 0; int size_tmp = 0; int tmp; // sqp size += sizeof(ocp_nlp_sqp_work); // array of pointers // cost size += (N + 1) * sizeof(void *); // dynamics size += N * sizeof(void *); // constraints size += (N + 1) * sizeof(void *); // qp in size += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver); // qp out size += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver); if(opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver); } if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else // qp solver tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (ii = 0; ii < N; ii++) { tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (ii = 0; ii <= N; ii++) { tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (ii = 0; ii <= N; ii++) { tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } size += size_tmp; #endif } else { // qp solver size += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (ii = 0; ii < N; ii++) { size += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (ii = 0; ii <= N; ii++) { size += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (ii = 0; ii <= N; ii++) { size += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } // dzduxt size += (N+1)*sizeof(struct blasfeo_dmat); for(ii=0; ii<=N; ii++) size += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); // z_alg size += (N+1)*sizeof(struct blasfeo_dvec); for(ii=0; ii<=N; ii++) size += blasfeo_memsize_dvec(nz[ii]); size += 1*8; // blasfeo_str align size += 1*64; // blasfeo_mem align return size; } // TODO(all): introduce member "memsize" in all structures to make on-line cast cheaper (i.e. avoid // to calculate size on-line) static void ocp_nlp_sqp_cast_workspace(void *config_, ocp_nlp_dims *dims, ocp_nlp_sqp_work *work, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_opts *opts) { ocp_nlp_config *config = (ocp_nlp_config *) config_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_dynamics_config **dynamics = config->dynamics; ocp_nlp_cost_config **cost = config->cost; ocp_nlp_constraints_config **constraints = config->constraints; // loop index int ii; // extract dims int N = dims->N; int *nx = dims->nx; int *nu = dims->nu; int *nz = dims->nz; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_work); // array of pointers // work->dynamics = (void **) c_ptr; c_ptr += N * sizeof(void *); // work->cost = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // work->constraints = (void **) c_ptr; c_ptr += (N + 1) * sizeof(void *); // qp in work->qp_in = ocp_qp_in_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += ocp_qp_in_calculate_size(qp_solver, dims->qp_solver); // qp out work->qp_out = ocp_qp_out_assign(qp_solver, dims->qp_solver, c_ptr); c_ptr += ocp_qp_out_calculate_size(qp_solver, dims->qp_solver); if(opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver); } if (opts->reuse_workspace) { #if defined(ACADOS_WITH_OPENMP) // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } #else int size_tmp = 0; int tmp; // qp solver work->qp_work = (void *) c_ptr; tmp = qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); size_tmp = tmp > size_tmp ? tmp : size_tmp; // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; tmp = dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; tmp = cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; tmp = constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); size_tmp = tmp > size_tmp ? tmp : size_tmp; } c_ptr += size_tmp; #endif } else { // qp solver work->qp_work = (void *) c_ptr; c_ptr += qp_solver->workspace_calculate_size(qp_solver, dims->qp_solver, opts->qp_solver_opts); // dynamics for (int ii = 0; ii < N; ii++) { work->dynamics[ii] = c_ptr; c_ptr += dynamics[ii]->workspace_calculate_size(dynamics[ii], dims->dynamics[ii], opts->dynamics[ii]); } // cost for (int ii = 0; ii <= N; ii++) { work->cost[ii] = c_ptr; c_ptr += cost[ii]->workspace_calculate_size(cost[ii], dims->cost[ii], opts->cost[ii]); } // constraints for (int ii = 0; ii <= N; ii++) { work->constraints[ii] = c_ptr; c_ptr += constraints[ii]->workspace_calculate_size(constraints[ii], dims->constraints[ii], opts->constraints[ii]); } } // blasfeo_str align align_char_to(8, &c_ptr); // dzduxt work->dzduxt = (struct blasfeo_dmat *) c_ptr; c_ptr += (N+1)*sizeof(struct blasfeo_dmat); // z_alg work->z_alg = (struct blasfeo_dvec *) c_ptr; c_ptr += (N+1)*sizeof(struct blasfeo_dvec); // blasfeo_mem align align_char_to(64, &c_ptr); // dzduxt for(ii=0; ii<=N; ii++) { blasfeo_create_dmat(nu[ii]+nx[ii], nz[ii], work->dzduxt+ii, c_ptr); c_ptr += blasfeo_memsize_dmat(nu[ii]+nx[ii], nz[ii]); } // z_alg for(ii=0; ii<=N; ii++) { blasfeo_create_dvec(nz[ii], work->z_alg+ii, c_ptr); c_ptr += blasfeo_memsize_dvec(nz[ii]); } // assert & return assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ static void initialize_qp(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { ocp_nlp_config *config = (ocp_nlp_config *) config_; // loop index int ii; // extract dims int N = dims->N; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (ii = 0; ii <= N; ii++) { // cost config->cost[ii]->initialize(config->cost[ii], dims->cost[ii], nlp_in->cost[ii], opts->cost[ii], mem->cost[ii], work->cost[ii]); // dynamics if (ii < N) config->dynamics[ii]->initialize(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); // constraints config->constraints[ii]->initialize(config->constraints[ii], dims->constraints[ii], nlp_in->constraints[ii], opts->constraints[ii], mem->constraints[ii], work->constraints[ii]); } return; } static void linearize_update_qp_matrices(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { ocp_nlp_config *config = (ocp_nlp_config *) config_; // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; int *nu = dims->nu; int *ni = dims->ni; ocp_nlp_memory *nlp_mem = mem->nlp_mem; /* stage-wise multiple shooting lagrangian evaluation */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // init Hessian to 0 blasfeo_dgese(nu[i] + nx[i], nu[i] + nx[i], 0.0, work->qp_in->RSQrq+i, 0, 0); // dynamics if (i < N) config->dynamics[i]->update_qp_matrices(config->dynamics[i], dims->dynamics[i], nlp_in->dynamics[i], opts->dynamics[i], mem->dynamics[i], work->dynamics[i]); // cost config->cost[i]->update_qp_matrices(config->cost[i], dims->cost[i], nlp_in->cost[i], opts->cost[i], mem->cost[i], work->cost[i]); // constraints config->constraints[i]->update_qp_matrices(config->constraints[i], dims->constraints[i], nlp_in->constraints[i], opts->constraints[i], mem->constraints[i], work->constraints[i]); } /* collect stage-wise evaluations */ #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i=0; i <= N; i++) { // nlp mem: cost_grad struct blasfeo_dvec *cost_grad = config->cost[i]->memory_get_grad_ptr(mem->cost[i]); blasfeo_dveccp(nv[i], cost_grad, 0, nlp_mem->cost_grad + i, 0); // nlp mem: dyn_fun if (i < N) { struct blasfeo_dvec *dyn_fun = config->dynamics[i]->memory_get_fun_ptr(mem->dynamics[i]); blasfeo_dveccp(nx[i + 1], dyn_fun, 0, nlp_mem->dyn_fun + i, 0); } // nlp mem: dyn_adj if (i < N) { struct blasfeo_dvec *dyn_adj = config->dynamics[i]->memory_get_adj_ptr(mem->dynamics[i]); blasfeo_dveccp(nu[i] + nx[i], dyn_adj, 0, nlp_mem->dyn_adj + i, 0); } else { blasfeo_dvecse(nu[N] + nx[N], 0.0, nlp_mem->dyn_adj + N, 0); } if (i > 0) { struct blasfeo_dvec *dyn_adj = config->dynamics[i-1]->memory_get_adj_ptr(mem->dynamics[i-1]); blasfeo_daxpy(nx[i], 1.0, dyn_adj, nu[i-1]+nx[i-1], nlp_mem->dyn_adj+i, nu[i], nlp_mem->dyn_adj+i, nu[i]); } // nlp mem: ineq_fun struct blasfeo_dvec *ineq_fun = config->constraints[i]->memory_get_fun_ptr(mem->constraints[i]); blasfeo_dveccp(2 * ni[i], ineq_fun, 0, nlp_mem->ineq_fun + i, 0); // nlp mem: ineq_adj struct blasfeo_dvec *ineq_adj = config->constraints[i]->memory_get_adj_ptr(mem->constraints[i]); blasfeo_dveccp(nv[i], ineq_adj, 0, nlp_mem->ineq_adj + i, 0); } // TODO(all): still to clean !!!!!!!!!!!!! for (i = 0; i <= N; i++) { // TODO(rien) where should the update happen??? move to qp update ??? // TODO(all): fix and move where appropriate // if(i<N) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme != NULL && opts->scheme->type != exact) // { // for (int_t j = 0; j < nx; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, nu+j) += work->sim_out[i]->grad[j]; // for (int_t j = 0; j < nu; j++) // BLASFEO_DVECEL(nlp_mem->cost_grad+i, j) += work->sim_out[i]->grad[nx+j]; // } // } } return; } // update QP rhs for SQP (step prim var, abs dual var) // TODO(all): move in dynamics, cost, constraints modules ??? static void sqp_update_qp_vectors(void *config_, ocp_nlp_dims *dims, ocp_nlp_in *nlp_in, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; ocp_nlp_memory *nlp_mem = mem->nlp_mem; #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // g blasfeo_dveccp(nv[i], nlp_mem->cost_grad + i, 0, work->qp_in->rqz + i, 0); // b if (i < N) blasfeo_dveccp(nx[i + 1], nlp_mem->dyn_fun + i, 0, work->qp_in->b + i, 0); // d blasfeo_dveccp(2 * ni[i], nlp_mem->ineq_fun + i, 0, work->qp_in->d + i, 0); } return; } static void sqp_update_variables(void *config_, ocp_nlp_dims *dims, ocp_nlp_out *nlp_out, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_work *work) { // loop index int i; // extract dims int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; int *nz = dims->nz; // ocp_nlp_config *config = (ocp_nlp_config *) config_; // TODO(all): fix and move where appropriate // for (i = 0; i < N; i++) // { // nx1 = dims->constraints[i+1]->nx; // for (j = 0; j < nx1; j++) // { // work->sim_in[i]->S_adj[j] = -BLASFEO_DVECEL(&work->qp_out->pi[i], j); // } // } #if defined(ACADOS_WITH_OPENMP) #pragma omp parallel for #endif for (i = 0; i <= N; i++) { // (full) step in primal variables blasfeo_daxpy(nv[i], 1.0, work->qp_out->ux + i, 0, nlp_out->ux + i, 0, nlp_out->ux + i, 0); // absolute in dual variables if (i < N) blasfeo_dveccp(nx[i + 1], work->qp_out->pi + i, 0, nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->qp_out->lam + i, 0, nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->qp_out->t + i, 0, nlp_out->t + i, 0); if (i < N) blasfeo_dveccp(nz[i], work->z_alg+i, 0, nlp_out->z+i, 0); } return; } // Simple fixed-step Gauss-Newton based SQP routine int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { // acados timer acados_timer timer0, timer1; // start timer acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_work *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts); // zero timers double total_time = 0.0; mem->time_qp_sol = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_tot = 0.0; // extract dims int N = dims->N; int ii; int qp_iter = 0; int qp_status = 0; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr(nlp_out->ux+ii+1, mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr(nlp_out->pi+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr(work->qp_in->BAbt+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr(work->dzduxt+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_guess_ptr(nlp_out->z+ii, mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr(work->z_alg+ii, mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr(nlp_out->ux + ii, mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr(work->z_alg+ii, mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr(work->dzduxt+ii, mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq + ii, mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr(work->qp_in->Z + ii, mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr(nlp_out->ux+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr(nlp_out->lam+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr(work->qp_in->DCt+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr(work->qp_in->RSQrq+ii, mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr(work->qp_in->idxb[ii], mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_ptr(work->qp_in->idxs[ii], mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr(dims->regularize, work->qp_in->RSQrq, mem->regularize_mem); config->regularize->memory_set_rq_ptr(dims->regularize, work->qp_in->rqz, mem->regularize_mem); config->regularize->memory_set_BAbt_ptr(dims->regularize, work->qp_in->BAbt, mem->regularize_mem); config->regularize->memory_set_b_ptr(dims->regularize, work->qp_in->b, mem->regularize_mem); config->regularize->memory_set_idxb_ptr(dims->regularize, work->qp_in->idxb, mem->regularize_mem); config->regularize->memory_set_DCt_ptr(dims->regularize, work->qp_in->DCt, mem->regularize_mem); config->regularize->memory_set_ux_ptr(dims->regularize, work->qp_out->ux, mem->regularize_mem); config->regularize->memory_set_pi_ptr(dims->regularize, work->qp_out->pi, mem->regularize_mem); config->regularize->memory_set_lam_ptr(dims->regularize, work->qp_out->lam, mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP initialize_qp(config, dims, nlp_in, nlp_out, opts, mem, work); // main sqp loop int sqp_iter = 0; for (; sqp_iter < opts->max_iter; sqp_iter++) { // printf("\n------- sqp iter %d (max_iter %d) --------\n", sqp_iter, opts->max_iter); // if(sqp_iter==2) // exit(1); // start timer acados_tic(&timer1); // linearizate NLP and update QP matrices linearize_update_qp_matrices(config, dims, nlp_in, nlp_out, opts, mem, work); // stop timer mem->time_lin += acados_toc(&timer1); // update QP rhs for SQP (step prim var, abs dual var) sqp_update_qp_vectors(config, dims, nlp_in, nlp_out, opts, mem, work); // compute nlp residuals ocp_nlp_res_compute(dims, nlp_in, nlp_out, mem->nlp_res, mem->nlp_mem); nlp_out->inf_norm_res = mem->nlp_res->inf_norm_res_g; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_b > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_b : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_d > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_d : nlp_out->inf_norm_res; nlp_out->inf_norm_res = (mem->nlp_res->inf_norm_res_m > nlp_out->inf_norm_res) ? mem->nlp_res->inf_norm_res_m : nlp_out->inf_norm_res; // save statistics if (sqp_iter < mem->stat_m) { mem->stat[mem->stat_n*sqp_iter+0] = mem->nlp_res->inf_norm_res_g; mem->stat[mem->stat_n*sqp_iter+1] = mem->nlp_res->inf_norm_res_b; mem->stat[mem->stat_n*sqp_iter+2] = mem->nlp_res->inf_norm_res_d; mem->stat[mem->stat_n*sqp_iter+3] = mem->nlp_res->inf_norm_res_m; mem->stat[mem->stat_n*sqp_iter+4] = qp_status; mem->stat[mem->stat_n*sqp_iter+5] = qp_iter; } // exit conditions on residuals if ((mem->nlp_res->inf_norm_res_g < opts->tol_stat) & (mem->nlp_res->inf_norm_res_b < opts->tol_eq) & (mem->nlp_res->inf_norm_res_d < opts->tol_ineq) & (mem->nlp_res->inf_norm_res_m < opts->tol_comp)) { // printf("%d sqp iterations\n", sqp_iter); // print_ocp_qp_in(work->qp_in); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time nlp_out->total_time = total_time; mem->time_tot = total_time; #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; return mem->status; } // start timer acados_tic(&timer1); // regularize Hessian config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem); // stop timer mem->time_reg += acados_toc(&timer1); // printf("\n------- qp_in (sqp iter %d) --------\n", sqp_iter); // print_ocp_qp_in(work->qp_in); // if(sqp_iter==1) // exit(1); // no warm start at first iteration if(sqp_iter==0) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &tmp_int); } // start timer acados_tic(&timer1); // TODO move qp_out in memory !!!!! (it has to be preserved to do warm start) qp_status = qp_solver->evaluate(qp_solver, work->qp_in, work->qp_out, opts->qp_solver_opts, mem->qp_solver_mem, work->qp_work); // stop timer mem->time_qp_sol += acados_toc(&timer1); // start timer acados_tic(&timer1); // compute correct dual solution in case of Hessian regularization config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->regularize, mem->regularize_mem); // stop timer mem->time_reg += acados_toc(&timer1); // restore default warm start if(sqp_iter==0) { config->qp_solver->opts_set(config->qp_solver, opts->qp_solver_opts, "warm_start", &opts->qp_warm_start); } // TODO move into QP solver memory ??? nlp_out->qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter; qp_iter = ((ocp_qp_info *) work->qp_out->misc)->num_iter; // compute external QP residuals (for debugging) if(opts->ext_qp_res) { ocp_qp_res_compute(work->qp_in, work->qp_out, work->qp_res, work->qp_res_ws); if (sqp_iter+1 < mem->stat_m) ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+6)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, inf_norm_qp_res[0], inf_norm_qp_res[1], inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(work->qp_out); // if(sqp_iter==1) // exit(1); if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(work->qp_in); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // stop timer total_time += acados_toc(&timer0); // save time mem->time_tot = total_time; nlp_out->total_time = total_time; printf("QP solver returned error status %d in iteration %d\n", qp_status, sqp_iter); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_QP_FAILURE; return mem->status; } sqp_update_variables(config, dims, nlp_out, opts, mem, work); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // ??? @rien // for (int_t i = 0; i < N; i++) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme == NULL) // continue; // opts->sens_adj = (opts->scheme->type != exact); // if (nlp_in->freezeSens) { // // freeze inexact sensitivities after first SQP iteration !! // opts->scheme->freeze = true; // } // } } // stop timer total_time += acados_toc(&timer0); // ocp_nlp_out_print(nlp_out); // save sqp iterations number mem->sqp_iter = sqp_iter; nlp_out->sqp_iter = sqp_iter; // save time mem->time_tot = total_time; nlp_out->total_time = total_time; // printf("%d sqp iterations\n", sqp_iter); // print_ocp_qp_in(work->qp_in); // maximum number of iterations reached #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_MAXITER; return mem->status; } int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_work *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, work, mem, opts); // extract dims int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(fuck_lint) checks // TODO(fuck_lint) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { // TODO(fuck_lint) check that ns in opt_var == ns in constraints } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->dynamics[ii], mem->dynamics[ii], work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_get(void *config_, void *mem_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = mem->sqp_iter; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_res; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field); exit(1); } } void ocp_nlp_sqp_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_opts_update; config->opts_set = &ocp_nlp_sqp_opts_set; config->dynamics_opts_set = &ocp_nlp_sqp_dynamics_opts_set; config->cost_opts_set = &ocp_nlp_sqp_cost_opts_set; config->constraints_opts_set = &ocp_nlp_sqp_constraints_opts_set; config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp; config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default; config->precompute = &ocp_nlp_sqp_precompute; config->get = &ocp_nlp_sqp_get; return; }
SparseDenseProduct.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2008-2015 Gael Guennebaud <gael.guennebaud@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_SPARSEDENSEPRODUCT_H #define EIGEN_SPARSEDENSEPRODUCT_H namespace Eigen { namespace internal { template <> struct product_promote_storage_type<Sparse,Dense, OuterProduct> { typedef Sparse ret; }; template <> struct product_promote_storage_type<Dense,Sparse, OuterProduct> { typedef Sparse ret; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType, int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor, bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1> struct sparse_time_dense_product_impl; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; typedef evaluator<Lhs> LhsEval; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { LhsEval lhsEval(lhs); Index n = lhs.outerSize(); #ifdef EIGEN_HAS_OPENMP Eigen::initParallel(); Index threads = Eigen::nbThreads(); #endif for(Index c=0; c<rhs.cols(); ++c) { #ifdef EIGEN_HAS_OPENMP // This 20000 threshold has been found experimentally on 2D and 3D Poisson problems. // It basically represents the minimal amount of work to be done to be worth it. if(threads>1 && lhsEval.nonZerosEstimate() > 20000) { #pragma omp parallel for schedule(dynamic,(n+threads*4-1)/(threads*4)) num_threads(threads) for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } else #endif { for(Index i=0; i<n; ++i) processRow(lhsEval,rhs,res,alpha,i,c); } } } static void processRow(const LhsEval& lhsEval, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha, Index i, Index col) { typename Res::Scalar tmp(0); for(LhsInnerIterator it(lhsEval,i); it ;++it) tmp += it.value() * rhs.coeff(it.index(),col); res.coeffRef(i,col) += alpha * tmp; } }; // FIXME: what is the purpose of the following specialization? Is it for the BlockedSparse format? template<typename T1, typename T2/*, int _Options, typename _StrideType*/> struct scalar_product_traits<T1, Ref<T2/*, _Options, _StrideType*/> > { enum { Defined = 1 }; typedef typename CwiseUnaryOp<scalar_multiple2_op<T1, typename T2::Scalar>, T2>::PlainObject ReturnType; }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType, ColMajor, true> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index c=0; c<rhs.cols(); ++c) { for(Index j=0; j<lhs.outerSize(); ++j) { // typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c); typename internal::scalar_product_traits<AlphaType, typename Rhs::Scalar>::ReturnType rhs_j(alpha * rhs.coeff(j,c)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.coeffRef(it.index(),c) += it.value() * rhs_j; } } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, RowMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Res::RowXpr res_j(res.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res_j += (alpha*it.value()) * rhs.row(it.index()); } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType> struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, typename DenseResType::Scalar, ColMajor, false> { typedef typename internal::remove_all<SparseLhsType>::type Lhs; typedef typename internal::remove_all<DenseRhsType>::type Rhs; typedef typename internal::remove_all<DenseResType>::type Res; typedef typename evaluator<Lhs>::InnerIterator LhsInnerIterator; static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { evaluator<Lhs> lhsEval(lhs); for(Index j=0; j<lhs.outerSize(); ++j) { typename Rhs::ConstRowXpr rhs_j(rhs.row(j)); for(LhsInnerIterator it(lhsEval,j); it ;++it) res.row(it.index()) += (alpha*it.value()) * rhs_j; } } }; template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType> inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha) { sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, AlphaType>::run(lhs, rhs, res, alpha); } } // end namespace internal namespace internal { template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,SparseShape,DenseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dest> static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? 1 : Rhs::ColsAtCompileTime>::type LhsNested; typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==0) ? 1 : Dynamic>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); internal::sparse_time_dense_product(lhsNested, rhsNested, dst, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, SparseTriangularShape, DenseShape, ProductType> : generic_product_impl<Lhs, Rhs, SparseShape, DenseShape, ProductType> {}; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> : generic_product_impl_base<Lhs,Rhs,generic_product_impl<Lhs,Rhs,DenseShape,SparseShape,ProductType> > { typedef typename Product<Lhs,Rhs>::Scalar Scalar; template<typename Dst> static void scaleAndAddTo(Dst& dst, const Lhs& lhs, const Rhs& rhs, const Scalar& alpha) { typedef typename nested_eval<Lhs,((Rhs::Flags&RowMajorBit)==0) ? Dynamic : 1>::type LhsNested; typedef typename nested_eval<Rhs,((Lhs::Flags&RowMajorBit)==RowMajorBit) ? 1 : Lhs::RowsAtCompileTime>::type RhsNested; LhsNested lhsNested(lhs); RhsNested rhsNested(rhs); // transpose everything Transpose<Dst> dstT(dst); internal::sparse_time_dense_product(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha); } }; template<typename Lhs, typename Rhs, int ProductType> struct generic_product_impl<Lhs, Rhs, DenseShape, SparseTriangularShape, ProductType> : generic_product_impl<Lhs, Rhs, DenseShape, SparseShape, ProductType> {}; template<typename LhsT, typename RhsT, bool NeedToTranspose> struct sparse_dense_outer_product_evaluator { protected: typedef typename conditional<NeedToTranspose,RhsT,LhsT>::type Lhs1; typedef typename conditional<NeedToTranspose,LhsT,RhsT>::type ActualRhs; typedef Product<LhsT,RhsT,DefaultProduct> ProdXprType; // if the actual left-hand side is a dense vector, // then build a sparse-view so that we can seamlessly iterate over it. typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1, SparseView<Lhs1> >::type ActualLhs; typedef typename conditional<is_same<typename internal::traits<Lhs1>::StorageKind,Sparse>::value, Lhs1 const&, SparseView<Lhs1> >::type LhsArg; typedef evaluator<ActualLhs> LhsEval; typedef evaluator<ActualRhs> RhsEval; typedef typename evaluator<ActualLhs>::InnerIterator LhsIterator; typedef typename ProdXprType::Scalar Scalar; public: enum { Flags = NeedToTranspose ? RowMajorBit : 0, CoeffReadCost = HugeCost }; class InnerIterator : public LhsIterator { public: InnerIterator(const sparse_dense_outer_product_evaluator &xprEval, Index outer) : LhsIterator(xprEval.m_lhsXprImpl, 0), m_outer(outer), m_empty(false), m_factor(get(xprEval.m_rhsXprImpl, outer, typename internal::traits<ActualRhs>::StorageKind() )) {} EIGEN_STRONG_INLINE Index outer() const { return m_outer; } EIGEN_STRONG_INLINE Index row() const { return NeedToTranspose ? m_outer : LhsIterator::index(); } EIGEN_STRONG_INLINE Index col() const { return NeedToTranspose ? LhsIterator::index() : m_outer; } EIGEN_STRONG_INLINE Scalar value() const { return LhsIterator::value() * m_factor; } EIGEN_STRONG_INLINE operator bool() const { return LhsIterator::operator bool() && (!m_empty); } protected: Scalar get(const RhsEval &rhs, Index outer, Dense = Dense()) const { return rhs.coeff(outer); } Scalar get(const RhsEval &rhs, Index outer, Sparse = Sparse()) { typename RhsEval::InnerIterator it(rhs, outer); if (it && it.index()==0 && it.value()!=Scalar(0)) return it.value(); m_empty = true; return Scalar(0); } Index m_outer; bool m_empty; Scalar m_factor; }; sparse_dense_outer_product_evaluator(const Lhs1 &lhs, const ActualRhs &rhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } // transpose case sparse_dense_outer_product_evaluator(const ActualRhs &rhs, const Lhs1 &lhs) : m_lhs(lhs), m_lhsXprImpl(m_lhs), m_rhsXprImpl(rhs) { EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost); } protected: const LhsArg m_lhs; evaluator<ActualLhs> m_lhsXprImpl; evaluator<ActualRhs> m_rhsXprImpl; }; // sparse * dense outer product template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, SparseShape, DenseShape> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Lhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; template<typename Lhs, typename Rhs> struct product_evaluator<Product<Lhs, Rhs, DefaultProduct>, OuterProduct, DenseShape, SparseShape> : sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> { typedef sparse_dense_outer_product_evaluator<Lhs,Rhs, Rhs::IsRowMajor> Base; typedef Product<Lhs, Rhs> XprType; typedef typename XprType::PlainObject PlainObject; explicit product_evaluator(const XprType& xpr) : Base(xpr.lhs(), xpr.rhs()) {} }; } // end namespace internal } // end namespace Eigen #endif // EIGEN_SPARSEDENSEPRODUCT_H
5468.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4000. */ #include "correlation.h" /* Array initialization. */ static void init_array (int m, int n, DATA_TYPE *float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n)) { int i, j; *float_n = 1.2; for (i = 0; i < m; i++) for (j = 0; j < n; j++) data[i][j] = ((DATA_TYPE) i*j) / M; } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int m, DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m)) { int i, j; for (i = 0; i < m; i++) for (j = 0; j < m; j++) { fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]); if ((i * m + j) % 20 == 0) fprintf (stderr, "\n"); } fprintf (stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_correlation(int m, int n, DATA_TYPE float_n, DATA_TYPE POLYBENCH_2D(data,M,N,m,n), DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m), DATA_TYPE POLYBENCH_1D(mean,M,m), DATA_TYPE POLYBENCH_1D(stddev,M,m)) { int i, j, j1, j2; DATA_TYPE eps = 0.1f; #define sqrt_of_array_cell(x,j) sqrt(x[j]) #pragma scop /* Determine mean of column vectors of input data matrix */ #pragma omp parallel private(i, j, j2) num_threads(#P11) { #pragma omp for (j = 0; j < _PB_M; j++) { mean[j] = 0.0; for (i = 0; i < _PB_N; i++) mean[j] += data[i][j]; mean[j] /= float_n; } /* Determine standard deviations of column vectors of data matrix. */ #pragma omp for (j = 0; j < _PB_M; j++) { stddev[j] = 0.0; for (i = 0; i < _PB_N; i++) stddev[j] += (data[i][j] - mean[j]) * (data[i][j] - mean[j]); stddev[j] /= float_n; stddev[j] = sqrt_of_array_cell(stddev, j); /* The following in an inelegant but usual way to handle near-zero std. dev. values, which below would cause a zero- divide. */ stddev[j] = stddev[j] <= eps ? 1.0 : stddev[j]; } /* Center and reduce the column vectors. */ #pragma omp for (i = 0; i < _PB_N; i++) { #pragma omp for (j = 0; j < _PB_M; j++) { data[i][j] -= mean[j]; data[i][j] /= sqrt(float_n) * stddev[j]; } } /* Calculate the m * m correlation matrix. */ #pragma omp for (j1 = 0; j1 < _PB_M-1; j1++) { symmat[j1][j1] = 1.0; for (j2 = j1+1; j2 < _PB_M; j2++) { symmat[j1][j2] = 0.0; for (i = 0; i < _PB_N; i++) symmat[j1][j2] += (data[i][j1] * data[i][j2]); symmat[j2][j1] = symmat[j1][j2]; } } } #pragma endscop symmat[_PB_M-1][_PB_M-1] = 1.0; } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int m = M; /* Variable declaration/allocation. */ DATA_TYPE float_n; POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n); POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m); POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m); POLYBENCH_1D_ARRAY_DECL(stddev,DATA_TYPE,M,m); /* Initialize array(s). */ init_array (m, n, &float_n, POLYBENCH_ARRAY(data)); /* Start timer. */ polybench_start_instruments; /* Run kernel. */ kernel_correlation (m, n, float_n, POLYBENCH_ARRAY(data), POLYBENCH_ARRAY(symmat), POLYBENCH_ARRAY(mean), POLYBENCH_ARRAY(stddev)); /* Stop and print timer. */ polybench_stop_instruments; polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat))); /* Be clean. */ POLYBENCH_FREE_ARRAY(data); POLYBENCH_FREE_ARRAY(symmat); POLYBENCH_FREE_ARRAY(mean); POLYBENCH_FREE_ARRAY(stddev); return 0; }
elemwise_binary_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2016 by Contributors * \file elemwise_binary_op.h * \brief Function definition of elementwise binary operators */ #ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_ #include <mxnet/operator_util.h> #include <mxnet/op_attr_types.h> #include <vector> #include <string> #include <utility> #include <typeinfo> #include <algorithm> #include "../mxnet_op.h" #include "../mshadow_op.h" #include "../../engine/openmp.h" #include "elemwise_unary_op.h" #include "../../common/utils.h" #include "./init_op.h" namespace mxnet { namespace op { /*! Gather binary operator functions into ElemwiseBinaryOp class */ class ElemwiseBinaryOp : public OpBase { public: /*! \brief For sparse, assume missing rvalue is 0 */ template<typename OP, int Req> struct MissingRValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0))); } }; /*! \brief For sparse, assume missing lvalue is 0 */ template<typename OP, int Req> struct MissingLValueOp { typedef OP Operation; template<typename DType> MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) { KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i])); } }; private: /*! * \brief CSR operation requires temp space */ enum ResourceRequestType { kTempSpace }; /*! * \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input * CPU-Only version */ template<typename DType, typename OP, typename xpu> static inline size_t FillDense(mshadow::Stream<xpu> *s, const size_t idx_l, const size_t idx_r, const OpReqType req, mshadow::Tensor<xpu, 2, DType> *out, const size_t iter_out) { const int index_out_min = static_cast<int>(std::min(idx_l, idx_r)); if (static_cast<size_t>(index_out_min) > iter_out) { const DType zero_input_val = OP::Map(DType(0), DType(0)); #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) { Fill<false>(s, (*out)[i], req, zero_input_val); } } return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int' } static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) { return a1.var() == a2.var(); } public: /*! \brief Minimum of three */ static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) { return a < b ? (a < c ? a : c) : (b < c ? b : c); } private: template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseNone_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; Stream<xpu> *s = ctx.get_stream<xpu>(); const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes); const DType *ograd_dptr = inputs[0].dptr<DType>(); if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>()); } else if (req[0] != kNullOp) { DType *lgrad_dptr = outputs[0].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr); }); } if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) { CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>()); } else if (req[1] != kNullOp) { DType *rgrad_dptr = outputs[1].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr); }); } } template<typename xpu, typename LOP, typename ROP, typename DType> static void BackwardUseIn_(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { DCHECK_EQ(outputs.size(), 2U); DCHECK_EQ(inputs.size(), 3U); mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>(); const DType *ograd_dptr = inputs[0].dptr<DType>(); const DType *lhs_dptr = inputs[1].dptr<DType>(); const DType *rhs_dptr = inputs[2].dptr<DType>(); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { const int size = static_cast<int>( (outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * lgrad_dptr = outputs[0].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch( s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); MXNET_ASSIGN_REQ_SWITCH(req[1], Req, { const int size = static_cast<int>( (outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1) / mxnet_op::DataType<DType>::kLanes); DType * rgrad_dptr = outputs[1].dptr<DType>(); mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch( s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);}); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false, typename BackupCompute> static inline void RspRspOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs, BackupCompute backup_compute) { mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); // lhs grad if (req[0] != kNullOp) { // RspRspOp can handle dense outputs so long as OP(0, 0) == 0 RspRspOp<LOP>( s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0], false, false, false, false); // lhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0], false, false, true, false); } // rhs grad if (req[1] != kNullOp) { RspRspOp<ROP>( s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1], false, false, false, false); // rhs in-place RspRspOp<op::mshadow_op::mul>( s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1], false, false, true, false); } } template<typename xpu, typename LOP, typename ROP> static inline void DnsCsrCsrOpBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { const bool supported_ops = std::is_same<mshadow_op::right, LOP>::value && std::is_same<mshadow_op::left, ROP>::value; CHECK(supported_ops) << "Only backward for mul is supported (LOP should be right, ROP should be left)"; const NDArray& out_grad = inputs[0]; const NDArray& lhs_in = inputs[1]; const NDArray& rhs_in = inputs[2]; const NDArray& lhs_grad = outputs[0]; const NDArray& rhs_grad = outputs[1]; const bool reverse = (outputs[0].storage_type() == kCSRStorage); if (reverse) { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, rhs_in, req[0], lhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), lhs_in.data()}, {req[1]}, {rhs_grad.data()}); } else { DnsCsrCsrOp<xpu, mshadow_op::mul>(attrs, ctx, out_grad, lhs_in, req[1], rhs_grad, false); Compute<xpu, mshadow_op::mul>(attrs, ctx, {out_grad.data(), rhs_in.data()}, {req[0]}, {lhs_grad.data()}); } } public: /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */ template<typename OP> static void RspRspOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, bool lhs_may_be_dense, bool rhs_may_be_dense, bool allow_inplace, bool scatter); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief CSR -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void CsrCsrOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<cpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename OP> static void DnsCsrDnsOp(mshadow::Stream<gpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- CSR binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsCsrCsrOp(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); /*! \brief DNS -op- RSP binary operator for non-canonical NDArray */ template<typename xpu, typename OP> static void DnsRspDnsOp(mshadow::Stream<xpu> *s, const nnvm::NodeAttrs &attrs, const OpContext &ctx, const NDArray &lhs, const NDArray &rhs, OpReqType req, const NDArray &output, const bool reverse); public: /*! * \brief Rsp-op-Rsp operation which produces a dense result * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); /*! * \brief Allow one of the binary inputs to be dense and still produce a sparse output. * Typically used for sparse * dense = sparse. * Note: for csr, it dispatches to fallback other than csr, csr -> csr * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name; CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name; const auto& lhs_stype = in_attrs->at(0); const auto& rhs_stype = in_attrs->at(1); auto& out_stype = out_attrs->at(0); bool dispatched = false; const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns -> dns dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) { // rsp, dns -> rsp // dns, rsp -> rsp dispatched = storage_type_assign(&out_stype, kRowSparseStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage))) { // csr, dns -> csr // dns, csr -> csr dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } return dispatched; } /*! * \brief Allow one of the inputs to be dense and produce a dense output, * for rsp inputs only support when both inputs are rsp type. * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ template<bool cpu_only, bool rsp, bool csr> static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs) { using namespace common; CHECK_EQ(in_attrs->size(), 2); CHECK_EQ(out_attrs->size(), 1); const auto lhs_stype = (*in_attrs)[0]; const auto rhs_stype = (*in_attrs)[1]; bool dispatched = false; const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask; const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback : DispatchMode::kFComputeEx; if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { // dns, dns ... -> dns dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); } if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) { // rsp, rsp, ... -> rsp dispatched = storage_type_assign(out_attrs, kRowSparseStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) { // csr, csr, ... -> csr dispatched = storage_type_assign(out_attrs, kCSRStorage, dispatch_mode, dispatch_ex); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) || (lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) { // dense, csr -> dense / csr, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage))) { // dense, rsp -> dense / rsp, dense -> dense dispatched = storage_type_assign(out_attrs, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } if (!dispatched) { dispatch_fallback(out_attrs, dispatch_mode); } return true; } /*! * \brief Backward pass computing input gradient using forward inputs * \param attrs Attributes * \param dev_mask Device mask * \param dispatch_mode Dispatch Mode * \param in_attrs Input storage attributes * \param out_attrs Output storage attributes * \return true if handled */ static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs, int dev_mask, DispatchMode* dispatch_mode, std::vector<int> *in_attrs, std::vector<int> *out_attrs); template<typename xpu, typename OP> static void ComputeInt(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void Compute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "Operator " << attrs.op->name << " does not support boolean type"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void MixedUnaryBackwardUseInOutCompute(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 1U); if (mxnet::common::is_int(outputs[0].type_flag_) || outputs[0].type_flag_ == mshadow::kBool) { LOG(FATAL) << "gradient computation of operator " << attrs.op->name << " for " << mshadow::dtype_string(outputs[0].type_flag_) << " type is not supported"; } MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[2].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[2].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeWithBool(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeLogic(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[0].type_flag_, DType, { MSHADOW_TYPE_SWITCH_WITH_BOOL(inputs[1].type_flag_, EType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<bool>(), inputs[0].dptr<DType>(), inputs[1].dptr<EType>()); } }); }); }); } template<typename xpu, typename OP> static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; Stream<xpu> *s = ctx.get_stream<xpu>(); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1U); MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size()) + DataType<DType>::kLanes - 1) / DataType<DType>::kLanes; if (size != 0) { Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size, outputs[0].dptr<DType>(), inputs[0].dptr<DType>(), inputs[1].dptr<DType>()); } }); }); } template<typename xpu, typename OP> static void ComputeEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) && (out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) { // rsp, rsp -> rsp // rsp, rsp -> dns RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false); } else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) { // csr, csr -> csr CsrCsrOp<OP>(s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else if (((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && out_stype == kDefaultStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kRowSparseStorage); const NDArray& rsp = (reverse)? inputs[0] : inputs[1]; DnsRspDnsOp<xpu, OP>(s, attrs, ctx, dns, rsp, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } /*! \brief ComputeEx allowing dense lvalue and/or rvalue */ template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense> static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(inputs.size(), 2); CHECK_EQ(outputs.size(), 1); if (req[0] == kNullOp) return; const auto lhs_stype = inputs[0].storage_type(); const auto rhs_stype = inputs[1].storage_type(); const auto out_stype = outputs[0].storage_type(); if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) && ((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) || (lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) && lhs_may_be_dense && rhs_may_be_dense) { // rsp, rsp -> rsp // rsp, rsp -> dns // rsp, dns -> rsp // dns, rsp -> rsp // More than once dense not allowed (this will be checked in RspRspOp): // rsp, dns -> dns <-- NOT ALLOWED // dns, rsp -> dns <-- NOT ALLOWED mshadow::Stream<xpu> *s = ctx.get_stream<xpu>(); RspRspOp<OP>( s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false); } else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) { ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs); } else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) || (lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) && out_stype == kCSRStorage) { const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1]; const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1]; const bool reverse = (lhs_stype == kCSRStorage); DnsCsrCsrOp<xpu, OP>(attrs, ctx, dns, csr, req[0], outputs[0], reverse); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { CHECK_EQ(inputs.size(), 1U); // output grad CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto in_stype = inputs[0].storage_type(); const auto lhs_stype = outputs[0].storage_type(); const auto rhs_stype = outputs[1].storage_type(); // lhs grad if (req[0] != kNullOp) { if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> rsp, _. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(LOP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } // rhs grad if (req[1] != kNullOp) { if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) { CHECK_EQ(outputs[0].storage_type(), in_stype); // rsp -> _, rsp. op requires 0-input returns 0-output DCHECK_LT(std::fabs(static_cast<float>(ROP::Map(0))), 1e-5f); UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]}); } else { LogUnimplementedOp(attrs, ctx, inputs, req, outputs); } } } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template<typename xpu, typename LOP, typename ROP> static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<TBlob> &inputs, const std::vector<OpReqType> &req, const std::vector<TBlob> &outputs) { MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, { BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs); }); } template< typename xpu, typename LOP, typename ROP, bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false> static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs, const OpContext &ctx, const std::vector<NDArray> &inputs, const std::vector<OpReqType> &req, const std::vector<NDArray> &outputs) { using namespace common; CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad const auto out_grad_stype = inputs[0].storage_type(); const auto lhs_grad_stype = outputs[0].storage_type(); const auto rhs_grad_stype = outputs[1].storage_type(); if (ContainsOnlyStorage(inputs, kRowSparseStorage) && (lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) && (rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) { // rsp, rsp, rsp -> [dns, rsp], [dns, rsp] RspRspOpBackward<xpu, LOP, ROP, in0_ok_dense, in1_ok_dense, in2_ok_dense>( attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>); } if (((lhs_grad_stype == kDefaultStorage && rhs_grad_stype == kCSRStorage) || (lhs_grad_stype == kCSRStorage && rhs_grad_stype == kDefaultStorage)) && out_grad_stype == kDefaultStorage) { // dns, csr, dns -> [csr, dns] / csr, dns, dns -> [dns, csr] DnsCsrCsrOpBackward<xpu, LOP, ROP>(attrs, ctx, inputs, req, outputs); } } }; // class ElemwiseBinaryOp /*! \brief Binary launch */ #define MXNET_OPERATOR_REGISTER_BINARY(name) \ NNVM_REGISTER_OP(name) \ .set_num_inputs(2) \ .set_num_outputs(1) \ .set_attr<nnvm::FListInputNames>("FListInputNames", \ [](const NodeAttrs& attrs) { \ return std::vector<std::string>{"lhs", "rhs"}; \ }) \ .set_attr<mxnet::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \ .set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \ .set_attr<nnvm::FInplaceOption>("FInplaceOption", \ [](const NodeAttrs& attrs){ \ return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \ }) \ .add_argument("lhs", "NDArray-or-Symbol", "first input") \ .add_argument("rhs", "NDArray-or-Symbol", "second input") /*! \brief Binary launch, with FComputeEx for csr and rsp available */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseStorageType<2, 1, true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, with FComputeEx for csr and rsp available. when inputs contain both sparse and dense, sparse output is preferred. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferSparseStorageType) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) /*! \brief Binary launch, dense result * FInferStorageType attr is not set using this macro. * By default DefaultStorageType is used. */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::SparseSparseWithDenseResult) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) /*! \brief Binary launch, with FComputeEx for prefer dense */ #define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \ MXNET_OPERATOR_REGISTER_BINARY(__name$) \ .set_attr<FInferStorageType>("FInferStorageType", \ ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \ .set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \ .set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \ .set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \ [](const NodeAttrs& attrs) { \ return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};}) } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
NM_chi2dist.c
#include "mex.h" //#include "chi2double.h" #include <stdio.h> #include <float.h> /* We calculate calculate chi2=(a-b)**2/(a+b+FLT_MIN) to avoid division-by-zero: If a+b != 0, then (a+b+FLT_MIN)==(a+b) and nothing changed. If a+b == 0, then the numerator is 0 as well, and we don't divide by 0. */ // Using compiler intrinsics (for SSE >=2) can have a huge speedup effect: // 8x for float and 3.5x for double on Intel Core2. // You have to compile with the right CPU setting, e.g. gcc -march=k8 or -march=nocona #ifdef __SSE2__ #include <emmintrin.h> // for float #endif // OpenMP allows to achieve almost linear speedup on multiCore CPUs: use gcc-4.2 -fopenmp #ifdef _OPENMP #include <omp.h> #endif // calculate the chi2-distance between two vectors/histograms of unknown alignment/size double chi2_double(const int dim, const double* const x, const double* const y); // calculate the chi2-distance matrix between a set of vectors/histograms. double chi2sym_distance_double(const int dim, const int nx, const double* const x, double* const K); //calculate the chi2-distance matrix between two sets of vectors/histograms. double chi2_distance_double(const int dim, const int nx, const double* const x, const int ny, const double* const y, double* const K); /* computes the chi square distance between the input arguments d(X,Y) = sum ((X(i)-Y(i))^2/(X(i)+Y(i))) / 2 */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { double *vecA, *vecB; double *dist; unsigned int ptsA,ptsB,dim; if (nrhs == 0) { mexPrintf("Usage: d = chi2_mex(X,Y);\n"); mexPrintf("where X and Y are matrices of dimension [dim,npts]\n"); mexPrintf("\nExample\n a = rand(2,10);\n b = rand(2,20);\n d = chi2_mex(a,b);\n"); return; } if (nrhs != 2){ mexPrintf("at least two input arguments expected."); return; } if (mxGetNumberOfDimensions(prhs[0]) != 2 || mxGetNumberOfDimensions(prhs[1]) != 2) { mexPrintf("inputs must be two dimensional"); return; } vecA = (double *)mxGetPr(prhs[0]); vecB = (double *)mxGetPr(prhs[1]); ptsA = mxGetN(prhs[0]); ptsB = mxGetN(prhs[1]); dim = mxGetM(prhs[0]); if (dim != mxGetM(prhs[1])) { mexPrintf("Dimension mismatch"); return; } plhs[0] = mxCreateDoubleMatrix(ptsA,ptsB,mxREAL); dist = (double *)mxGetPr(plhs[0]); chi2_distance_double(dim,ptsB,vecB,ptsA,vecA,dist); return; } static _inline double chi2_baseline_double(const int n, const double* const x, const double* const y) { double result = 0.f; int i; for (i=0; i<n; i++) { const double num = x[i]-y[i]; const double denom = 1./(x[i]+y[i]+DBL_MIN); result += num*num*denom; } return result; } // use compiler intrinsics for 2x parallel processing static _inline double chi2_intrinsic_double(int n, const double* x, const double* y) { double result=0; const __m128d eps = _mm_set1_pd(DBL_MIN); const __m128d zero = _mm_setzero_pd(); __m128d chi2 = _mm_setzero_pd(); for ( ; n>1; n-=2) { const __m128d a = _mm_loadu_pd(x); const __m128d b = _mm_loadu_pd(y); x+=2; y+=2; const __m128d a_plus_b = _mm_add_pd(a,b); const __m128d a_plus_b_plus_eps = _mm_add_pd(a_plus_b,eps); const __m128d a_minus_b = _mm_sub_pd(a,b); const __m128d a_minus_b_sq = _mm_mul_pd(a_minus_b, a_minus_b); const __m128d quotient = _mm_div_pd(a_minus_b_sq, a_plus_b_plus_eps); chi2 = _mm_add_pd(chi2, quotient); } const __m128d shuffle = _mm_shuffle_pd(chi2, chi2, _MM_SHUFFLE2(0,1)); const __m128d sum = _mm_add_pd(chi2, shuffle); // with SSE3, we could use hadd_pd, but the difference is negligible _mm_store_sd(&result,sum); _mm_empty(); if (n) result += chi2_baseline_double(n, x, y); // remaining entries return result; } // calculate the chi2-distance between two vectors/histograms double chi2_double(const int dim, const double* const x, const double* const y) { double (*chi2_double)(const int, const double*, const double*) = chi2_baseline_double; #ifdef __SSE2__ chi2_double = chi2_intrinsic_double; #endif return chi2_double(dim, x, y); } // calculate the chi2-measure between two sets of vectors/histograms double chi2sym_distance_double(const int dim, const int nx, const double* const x, double* const K) { double (*chi2_double)(const int, const double*, const double*) = chi2_baseline_double; #ifdef __SSE2__ chi2_double = chi2_intrinsic_double; #endif double sumK=0.; #pragma omp parallel { int i,j; #pragma omp for reduction (+:sumK) schedule (dynamic, 2) for (i=0;i<nx;i++) K[i*nx+i]=0.; for (j=0;j<i;j++) { const double chi2 = chi2_double(dim, &x[i*dim], &x[j*dim]); K[i*nx+j] = chi2; K[j*nx+i] = chi2; sumK += 2*chi2; } } return sumK/((float)(nx*nx)); } // calculate the chi2-measure between two sets of vectors/histograms double chi2_distance_double(const int dim, const int nx, const double* const x, const int ny, const double* const y, double* const K) { double (*chi2_double)(const int, const double*, const double*) = chi2_baseline_double; #ifdef __SSE2__ chi2_double = chi2_intrinsic_double; #endif double sumK=0.; #pragma omp parallel { int i,j; #pragma omp for reduction (+:sumK) for (i=0;i<nx;i++) { for (j=0;j<ny;j++) { const double chi2 = chi2_double(dim, &x[i*dim], &y[j*dim]); K[i*ny+j] = chi2 * 0.5; sumK += chi2; } } } return (sumK/((float)(nx*ny))); }
depthwise_conv2d.h
// Copyright 2018 Xiaomi, Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef MACE_KERNELS_DEPTHWISE_CONV2D_H_ #define MACE_KERNELS_DEPTHWISE_CONV2D_H_ #if defined(MACE_ENABLE_NEON) && defined(__aarch64__) #include <arm_neon.h> #endif #include <algorithm> #include <memory> #include <vector> #include "mace/core/future.h" #include "mace/kernels/conv_pool_2d_util.h" #include "mace/kernels/activation.h" #include "mace/kernels/arm/depthwise_conv2d_neon.h" #include "mace/public/mace.h" #ifdef MACE_ENABLE_OPENCL #include "mace/core/runtime/opencl/cl2_header.h" #endif // MACE_ENABLE_OPENCL namespace mace { namespace kernels { struct DepthwiseConv2dFunctorBase { DepthwiseConv2dFunctorBase(const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit) : strides_(strides), padding_type_(padding_type), paddings_(paddings), dilations_(dilations), activation_(activation), relux_max_limit_(relux_max_limit) {} const int *strides_; // [stride_h, stride_w] const Padding padding_type_; std::vector<int> paddings_; const int *dilations_; // [dilation_h, dilation_w] const ActivationType activation_; const float relux_max_limit_; }; template<DeviceType D, typename T> struct DepthwiseConv2dFunctor; template<> struct DepthwiseConv2dFunctor<DeviceType::CPU, float> : public DepthwiseConv2dFunctorBase { DepthwiseConv2dFunctor(const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit) : DepthwiseConv2dFunctorBase(strides, padding_type, paddings, dilations, activation, relux_max_limit) {} void DepthwiseConv2dGeneral(const float *input, const float *filter, const index_t *in_shape, const index_t *out_shape, const index_t *filter_shape, const int *stride_hw, const int *dilation_hw, const int *pad_hw, float *output) { const index_t multiplier = filter_shape[0] / filter_shape[1]; #pragma omp parallel for collapse(2) for (index_t b = 0; b < in_shape[0]; ++b) { for (index_t m = 0; m < filter_shape[0]; ++m) { for (index_t h = 0; h < out_shape[2]; ++h) { for (index_t w = 0; w < out_shape[3]; ++w) { const index_t out_channels = filter_shape[0]; const index_t in_channels = filter_shape[1]; const index_t filter_height = filter_shape[2]; const index_t filter_width = filter_shape[3]; const index_t in_height = in_shape[2]; const index_t in_width = in_shape[3]; const index_t out_height = out_shape[2]; const index_t out_width = out_shape[3]; index_t out_offset = ((b * out_channels + m) * out_height + h) * out_width + w; index_t c = m / multiplier; index_t o = m % multiplier; float sum = 0; for (index_t kh = 0; kh < filter_height; ++kh) { for (index_t kw = 0; kw < filter_width; ++kw) { index_t ih = h * stride_hw[0] + kh * dilation_hw[0] - pad_hw[0]; index_t iw = w * stride_hw[1] + kw * dilation_hw[1] - pad_hw[1]; if (ih >= 0 && ih < in_height && iw >= 0 && iw < in_width) { index_t in_offset = ((b * in_channels + c) * in_height + ih) * in_width + iw; index_t filter_offset = (((o * in_channels) + c) * filter_height + kh) * filter_width + kw; sum += input[in_offset] * filter[filter_offset]; } } } output[out_offset] = sum; } } } } } MaceStatus operator()(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output, StatsFuture *future) { MACE_UNUSED(future); MACE_CHECK_NOTNULL(input); MACE_CHECK_NOTNULL(filter); MACE_CHECK_NOTNULL(output); std::vector<index_t> output_shape(4); std::vector<int> paddings(2); std::vector<index_t> filter_shape {filter->dim(0) * filter->dim(1), filter->dim(1), filter->dim(2), filter->dim(3)}; if (paddings_.empty()) { CalcNCHWPaddingAndOutputSize(input->shape().data(), filter_shape.data(), dilations_, strides_, padding_type_, output_shape.data(), paddings.data()); } else { paddings = paddings_; CalcNCHWOutputSize(input->shape().data(), filter_shape.data(), paddings_.data(), dilations_, strides_, RoundType::FLOOR, output_shape.data()); } MACE_RETURN_IF_ERROR(output->Resize(output_shape)); output->Clear(); index_t batch = output->dim(0); index_t channels = output->dim(1); index_t height = output->dim(2); index_t width = output->dim(3); index_t input_batch = input->dim(0); index_t input_channels = input->dim(1); index_t input_height = input->dim(2); index_t input_width = input->dim(3); index_t filter_h = filter_shape[2]; index_t filter_w = filter_shape[3]; MACE_CHECK(filter_shape[0] == channels, filter_shape[0], " != ", channels); MACE_CHECK(filter_shape[1] == input_channels, filter_shape[1], " != ", input_channels); index_t stride_h = strides_[0]; index_t stride_w = strides_[1]; index_t dilation_h = dilations_[0]; index_t dilation_w = dilations_[1]; MACE_CHECK(batch == input_batch, "Input/Output batch size mismatch"); int pad_top = paddings[0] >> 1; int pad_bottom = paddings[0] - pad_top; int pad_left = paddings[1] >> 1; int pad_right = paddings[1] - pad_left; index_t valid_h_start = pad_top == 0 ? 0 : (pad_top - 1) / stride_h + 1; index_t valid_h_stop = pad_bottom == 0 ? height : height - ((pad_bottom - 1) / stride_h + 1); index_t valid_w_start = pad_left == 0 ? 0 : (pad_left - 1) / stride_w + 1; index_t valid_w_stop = pad_right == 0 ? width : width - ((pad_right - 1) / stride_w + 1); std::function<void(const float *input, float *output)> conv_func; Tensor::MappingGuard input_guard(input); Tensor::MappingGuard filter_guard(filter); Tensor::MappingGuard bias_guard(bias); Tensor::MappingGuard output_guard(output); auto input_data = input->data<float>(); auto filter_data = filter->data<float>(); auto bias_data = bias == nullptr ? nullptr : bias->data<float>(); auto output_data = output->mutable_data<float>(); const int pad_hw[2] = {pad_top, pad_left}; const index_t input_shape[4] = {batch, input_channels, input_height, input_width}; if (filter_h == 3 && filter_w == 3 && stride_h == 1 && stride_w == 1 && dilation_h == 1 && dilation_w == 1) { conv_func = [=](const float *input, float *output) { DepthwiseConv2dNeonK3x3S1(input, filter_data, input_shape, output_shape.data(), pad_hw, valid_h_start, valid_h_stop, valid_w_start, valid_w_stop, output); }; } else if (filter_h == 3 && filter_w == 3 && stride_h == 2 && stride_w == 2 && dilation_h == 1 && dilation_w == 1) { conv_func = [=](const float *input, float *output) { DepthwiseConv2dNeonK3x3S2(input, filter_data, input_shape, output_shape.data(), pad_hw, valid_h_start, valid_h_stop, valid_w_start, valid_w_stop, output); }; } else { conv_func = [=](const float *input, float *output) { DepthwiseConv2dGeneral(input, filter_data, input_shape, output_shape.data(), filter_shape.data(), strides_, dilations_, pad_hw, output); }; } conv_func(input_data, output_data); if (bias_data != nullptr) { #pragma omp parallel for collapse(2) for (index_t b = 0; b < batch; ++b) { for (index_t c = 0; c < channels; ++c) { for (index_t i = 0; i < height * width; ++i) { output_data[(b * channels + c) * height * width + i] += bias_data[c]; } } } } DoActivation(output_data, output_data, output->size(), activation_, relux_max_limit_); return MACE_SUCCESS; } }; #ifdef MACE_ENABLE_OPENCL template<typename T> struct DepthwiseConv2dFunctor<DeviceType::GPU, T> : DepthwiseConv2dFunctorBase { DepthwiseConv2dFunctor(const int *strides, const Padding padding_type, const std::vector<int> &paddings, const int *dilations, const ActivationType activation, const float relux_max_limit) : DepthwiseConv2dFunctorBase(strides, padding_type, paddings, dilations, activation, relux_max_limit) {} MaceStatus operator()(const Tensor *input, const Tensor *filter, const Tensor *bias, Tensor *output, StatsFuture *future); cl::Kernel kernel_; uint32_t kwg_size_; std::unique_ptr<BufferBase> kernel_error_; std::vector<index_t> input_shape_; }; #endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace #endif // MACE_KERNELS_DEPTHWISE_CONV2D_H_
ccl_core.c
#include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <math.h> #include <string.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_odeiv.h> #include <gsl/gsl_spline.h> #include <gsl/gsl_interp2d.h> #include <gsl/gsl_spline2d.h> #include <gsl/gsl_integration.h> #include "ccl.h" // // Macros for replacing relative paths #define EXPAND_STR(s) STRING(s) #define STRING(s) #s const ccl_configuration default_config = { ccl_boltzmann_class, ccl_halofit, ccl_nobaryons, ccl_tinker10, ccl_duffy2008, ccl_emu_strict}; //Precision parameters /** * Default relative precision if not otherwise specified */ #define GSL_EPSREL 1E-4 /** * Default number of iterations for integration and root-finding if not otherwise * specified */ #define GSL_N_ITERATION 1000 /** * Default number of Gauss-Kronrod points in QAG integration if not otherwise * specified */ #define GSL_INTEGRATION_GAUSS_KRONROD_POINTS GSL_INTEG_GAUSS41 /** * Relative precision in sigma_R calculations */ #define GSL_EPSREL_SIGMAR 1E-5 /** * Relative precision in distance calculations */ #define GSL_EPSREL_DIST 1E-6 /** * Relative precision in growth calculations */ #define GSL_EPSREL_GROWTH 1E-6 /** * Relative precision in dNdz calculations */ #define GSL_EPSREL_DNDZ 1E-6 const ccl_gsl_params default_gsl_params = { GSL_N_ITERATION, // N_ITERATION GSL_INTEGRATION_GAUSS_KRONROD_POINTS,// INTEGRATION_GAUSS_KRONROD_POINTS GSL_EPSREL, // INTEGRATION_EPSREL GSL_INTEGRATION_GAUSS_KRONROD_POINTS,// INTEGRATION_LIMBER_GAUSS_KRONROD_POINTS GSL_EPSREL, // INTEGRATION_LIMBER_EPSREL GSL_EPSREL_DIST, // INTEGRATION_DISTANCE_EPSREL GSL_EPSREL_SIGMAR, // INTEGRATION_SIGMAR_EPSREL GSL_EPSREL, // ROOT_EPSREL GSL_N_ITERATION, // ROOT_N_ITERATION GSL_EPSREL_GROWTH, // ODE_GROWTH_EPSREL 1E-6, // EPS_SCALEFAC_GROWTH 1E7, // HM_MMIN 1E17, // HM_MMAX 0.0, // HM_EPSABS 1E-4, // HM_EPSREL 1000, // HM_LIMIT GSL_INTEG_GAUSS41 // HM_INT_METHOD }; #undef GSL_EPSREL #undef GSL_N_ITERATION #undef GSL_INTEGRATION_GAUSS_KRONROD_POINTS #undef GSL_EPSREL_SIGMAR #undef GSL_EPSREL_DIST #undef GSL_EPSREL_GROWTH #undef GSL_EPSREL_DNDZ const ccl_spline_params default_spline_params = { // scale factor spline params 250, // A_SPLINE_NA 0.1, // A_SPLINE_MIN 0.01, // A_SPLINE_MINLOG_PK 0.1, // A_SPLINE_MIN_PK, 1.0, // A_SPLINE_MAX, 0.0001, // A_SPLINE_MINLOG, 250, // A_SPLINE_NLOG, // mass splines 0.025, // LOGM_SPLINE_DELTA 440, // LOGM_SPLINE_NM 6, // LOGM_SPLINE_MIN 17, // LOGM_SPLINE_MAX // PS a and k spline 40, // A_SPLINE_NA_PK 11, // A_SPLINE_NLOG_PK // k-splines and integrals 50, // K_MAX_SPLINE 1E3, // K_MAX 5E-5, // K_MIN 0.025, // DLOGK_INTEGRATION 167, // N_K 100000, // N_K_3DCOR // correlation function parameters 0.01, // ELL_MIN_CORR 60000, // ELL_MAX_CORR 5000, // N_ELL_CORR //Spline types NULL, NULL, NULL, NULL, NULL, NULL, NULL }; ccl_physical_constants ccl_constants = { /** * Lightspeed / H0 in units of Mpc/h (from CODATA 2014) */ 2997.92458, /** * Newton's gravitational constant in units of m^3/Kg/s^2 */ //6.6738e-11, /(from PDG 2013) in m^3/Kg/s^2 //6.67428e-11, // CLASS VALUE 6.67408e-11, // from CODATA 2014 /** * Solar mass in units of kg (from GSL) */ //GSL_CONST_MKSA_SOLAR_MASS, //1.9885e30, //(from PDG 2015) in Kg 1.9884754153381438E+30, //from IAU 2015 /** * Mpc to meters (from PDG 2016 and using M_PI) */ 3.085677581491367399198952281E+22, /** * pc to meters (from PDG 2016 and using M_PI) */ 3.085677581491367399198952281E+16, /** * Rho critical in units of M_sun/h / (Mpc/h)^3 */ ((3*100*100)/(8*M_PI*6.67408e-11)) * (1000*1000*3.085677581491367399198952281E+22/1.9884754153381438E+30), /** * Boltzmann constant in units of J/K */ //GSL_CONST_MKSA_BOLTZMANN, 1.38064852E-23, //from CODATA 2014 /** * Stefan-Boltzmann constant in units of kg/s^3 / K^4 */ //GSL_CONST_MKSA_STEFAN_BOLTZMANN_CONSTANT, 5.670367E-8, //from CODATA 2014 /** * Planck's constant in units kg m^2 / s */ //GSL_CONST_MKSA_PLANCKS_CONSTANT_H, 6.626070040E-34, //from CODATA 2014 /** * The speed of light in m/s */ //GSL_CONST_MKSA_SPEED_OF_LIGHT, 299792458.0, //from CODATA 2014 /** * Electron volt to Joules convestion */ //GSL_CONST_MKSA_ELECTRON_VOLT, 1.6021766208e-19, //from CODATA 2014 /** * Temperature of the CMB in K */ 2.725, //2.7255, // CLASS value /** * T_ncdm, as taken from CLASS, explanatory.ini */ 0.71611, /** * neutrino mass splitting differences * See Lesgourgues and Pastor, 2012 for these values. * Adv. High Energy Phys. 2012 (2012) 608515, * arXiv:1212.6154, page 13 */ 7.62E-5, 2.55E-3, -2.43E-3 }; /* ------- ROUTINE: ccl_cosmology_create ------ INPUTS: ccl_parameters params ccl_configuration config TASK: creates the ccl_cosmology struct and passes some values to it DEFINITIONS: chi: comoving distance [Mpc] growth: growth function (density) fgrowth: logarithmic derivative of the growth (density) (dlnD/da?) E: E(a)=H(a)/H0 growth0: growth at z=0, defined to be 1 sigma: ? p_lin: linear matter power spectrum at z=0? p_lnl: nonlinear matter power spectrum at z=0? computed_distances, computed_growth, computed_power, computed_sigma: store status of the computations */ ccl_cosmology * ccl_cosmology_create(ccl_parameters params, ccl_configuration config) { ccl_cosmology * cosmo = malloc(sizeof(ccl_cosmology)); cosmo->params = params; cosmo->config = config; cosmo->gsl_params = default_gsl_params; cosmo->spline_params = default_spline_params; cosmo->spline_params.A_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.K_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.M_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.D_SPLINE_TYPE = gsl_interp_akima; cosmo->spline_params.PNL_SPLINE_TYPE = gsl_interp2d_bicubic; cosmo->spline_params.PLIN_SPLINE_TYPE = gsl_interp2d_bicubic; cosmo->spline_params.CORR_SPLINE_TYPE = gsl_interp_akima; cosmo->data.chi = NULL; cosmo->data.growth = NULL; cosmo->data.fgrowth = NULL; cosmo->data.E = NULL; cosmo->data.growth0 = 1.; cosmo->data.achi = NULL; cosmo->data.logsigma = NULL; cosmo->data.dlnsigma_dlogm = NULL; cosmo->data.rsd_splines[0] = NULL; cosmo->data.rsd_splines[1] = NULL; cosmo->data.rsd_splines[2] = NULL; cosmo->data.p_lin = NULL; cosmo->data.p_nl = NULL; cosmo->computed_distances = false; cosmo->computed_growth = false; cosmo->computed_linear_power = false; cosmo->computed_nonlin_power = false; cosmo->computed_sigma = false; cosmo->status = 0; ccl_cosmology_set_status_message(cosmo, ""); if(cosmo->spline_params.A_SPLINE_MAX !=1.) { cosmo->status = CCL_ERROR_SPLINE; ccl_cosmology_set_status_message(cosmo, "ccl_core.c: A_SPLINE_MAX needs to be 1.\n"); } return cosmo; } /* ------ ROUTINE: ccl_parameters_fill_initial ------- INPUT: ccl_parameters: params TASK: fill parameters not set by ccl_parameters_create with some initial values DEFINITIONS: Omega_g = (Omega_g*h^2)/h^2 is the radiation parameter; "g" is for photons, as in CLASS T_CMB: CMB temperature in Kelvin Omega_l: Lambda A_s: amplitude of the primordial PS, enforced here to initially set to NaN sigma8: variance in 8 Mpc/h spheres for normalization of matter PS, enforced here to initially set to NaN z_star: recombination redshift */ void ccl_parameters_fill_initial(ccl_parameters * params, int *status) { // Fixed radiation parameters // Omega_g * h**2 is known from T_CMB params->T_CMB = ccl_constants.T_CMB; // kg / m^3 double rho_g = 4. * ccl_constants.STBOLTZ / pow(ccl_constants.CLIGHT, 3) * pow(params->T_CMB, 4); // kg / m^3 double rho_crit = ccl_constants.RHO_CRITICAL * ccl_constants.SOLAR_MASS/pow(ccl_constants.MPC_TO_METER, 3) * pow(params->h, 2); params->Omega_g = rho_g/rho_crit; // Get the N_nu_rel from Neff and N_nu_mass params->N_nu_rel = params->Neff - params->N_nu_mass * pow(ccl_constants.TNCDM, 4) / pow(4./11.,4./3.); // Temperature of the relativistic neutrinos in K double T_nu= (params->T_CMB) * pow(4./11.,1./3.); // in kg / m^3 double rho_nu_rel = params->N_nu_rel* 7.0/8.0 * 4. * ccl_constants.STBOLTZ / pow(ccl_constants.CLIGHT, 3) * pow(T_nu, 4); params-> Omega_nu_rel = rho_nu_rel/rho_crit; // If non-relativistic neutrinos are present, calculate the phase_space integral. if((params->N_nu_mass)>0) { params->Omega_nu_mass = ccl_Omeganuh2( 1.0, params->N_nu_mass, params->m_nu, params->T_CMB, status) / ((params->h)*(params->h)); } else{ params->Omega_nu_mass = 0.; } params->Omega_m = params->Omega_b + params-> Omega_c + params->Omega_nu_mass; params->Omega_l = 1.0 - params->Omega_m - params->Omega_g - params->Omega_nu_rel - params->Omega_k; // Initially undetermined parameters - set to nan to trigger // problems if they are mistakenly used. if (isfinite(params->A_s)) {params->sigma8 = NAN;} if (isfinite(params->sigma8)) {params->A_s = NAN;} params->z_star = NAN; if(fabs(params->Omega_k)<1E-6) params->k_sign=0; else if(params->Omega_k>0) params->k_sign=-1; else params->k_sign=1; params->sqrtk=sqrt(fabs(params->Omega_k))*params->h/ccl_constants.CLIGHT_HMPC; } /* ------ ROUTINE: ccl_parameters_create ------- INPUT: numbers for the basic cosmological parameters needed by CCL TASK: fill params with some initial values provided by the user DEFINITIONS: Omega_c: cold dark matter Omega_b: baryons Omega_m: matter Omega_k: curvature little omega_x means Omega_x*h^2 Neff : Effective number of neutrino speces mnu : Pointer to either sum of neutrino masses or list of three masses. mnu_type : how the neutrino mass(es) should be treated w0: Dark energy eq of state parameter wa: Dark energy eq of state parameter, time variation H0: Hubble's constant in km/s/Mpc. h: Hubble's constant divided by (100 km/s/Mpc). A_s: amplitude of the primordial PS n_s: index of the primordial PS */ ccl_parameters ccl_parameters_create( double Omega_c, double Omega_b, double Omega_k, double Neff, double* mnu, int n_mnu, double w0, double wa, double h, double norm_pk, double n_s, double bcm_log10Mc, double bcm_etab, double bcm_ks, double mu_0, double sigma_0, int nz_mgrowth, double *zarr_mgrowth, double *dfarr_mgrowth, int *status) { #ifndef USE_GSL_ERROR gsl_set_error_handler_off (); #endif ccl_parameters params; // Initialize params params.m_nu = NULL; params.z_mgrowth=NULL; params.df_mgrowth=NULL; params.sigma8 = NAN; params.A_s = NAN; params.Omega_c = Omega_c; params.Omega_b = Omega_b; params.Omega_k = Omega_k; params.Neff = Neff; params.m_nu = malloc(n_mnu*sizeof(double)); params.sum_nu_masses = 0.; for(int i = 0; i<n_mnu; i=i+1){ params.m_nu[i] = mnu[i]; params.sum_nu_masses = params.sum_nu_masses + mnu[i]; } if(params.sum_nu_masses<1e-15){ params.N_nu_mass = 0; }else{ params.N_nu_mass = n_mnu; } // Dark Energy params.w0 = w0; params.wa = wa; // Hubble parameters params.h = h; params.H0 = h*100; // Primordial power spectra if(norm_pk<1E-5) params.A_s=norm_pk; else params.sigma8=norm_pk; params.n_s = n_s; //Baryonic params if(bcm_log10Mc<0) params.bcm_log10Mc=log10(1.2e14); else params.bcm_log10Mc=bcm_log10Mc; if(bcm_etab<0) params.bcm_etab=0.5; else params.bcm_etab=bcm_etab; if(bcm_ks<0) params.bcm_ks=55.0; else params.bcm_ks=bcm_ks; // Params of the mu / Sigma parameterisation of MG params.mu_0 = mu_0; params.sigma_0 = sigma_0; // Set remaining standard and easily derived parameters ccl_parameters_fill_initial(&params, status); //Trigger modified growth function if nz>0 if(nz_mgrowth>0) { params.has_mgrowth=true; params.nz_mgrowth=nz_mgrowth; params.z_mgrowth=malloc(params.nz_mgrowth*sizeof(double)); params.df_mgrowth=malloc(params.nz_mgrowth*sizeof(double)); memcpy(params.z_mgrowth,zarr_mgrowth,params.nz_mgrowth*sizeof(double)); memcpy(params.df_mgrowth,dfarr_mgrowth,params.nz_mgrowth*sizeof(double)); } else { params.has_mgrowth=false; params.nz_mgrowth=0; params.z_mgrowth=NULL; params.df_mgrowth=NULL; } return params; } /* ------- ROUTINE: ccl_parameters_create_flat_lcdm -------- INPUT: some cosmological parameters needed to create a flat LCDM model TASK: call ccl_parameters_create to produce an LCDM model */ ccl_parameters ccl_parameters_create_flat_lcdm(double Omega_c, double Omega_b, double h, double norm_pk, double n_s, int *status) { double Omega_k = 0.0; double Neff = 3.046; double w0 = -1.0; double wa = 0.0; double *mnu; double mnuval = 0.; // a pointer to the variable is not kept past the lifetime of this function mnu = &mnuval; double mu_0 = 0.; double sigma_0 = 0.; ccl_parameters params = ccl_parameters_create(Omega_c, Omega_b, Omega_k, Neff, mnu, 0, w0, wa, h, norm_pk, n_s, -1, -1, -1, mu_0, sigma_0, -1, NULL, NULL, status); return params; } /** * Write a cosmology parameters object to a file in yaml format. * @param cosmo Cosmological parameters * @param f FILE* pointer opened for reading * @return void */ void ccl_parameters_write_yaml(ccl_parameters * params, const char * filename, int *status) { FILE * f = fopen(filename, "w"); if (!f){ *status = CCL_ERROR_FILE_WRITE; return; } #define WRITE_DOUBLE(name) fprintf(f, #name ": %le\n",params->name) #define WRITE_INT(name) fprintf(f, #name ": %d\n",params->name) // Densities: CDM, baryons, total matter, curvature WRITE_DOUBLE(Omega_c); WRITE_DOUBLE(Omega_b); WRITE_DOUBLE(Omega_m); WRITE_DOUBLE(Omega_k); WRITE_INT(k_sign); // Dark Energy WRITE_DOUBLE(w0); WRITE_DOUBLE(wa); // Hubble parameters WRITE_DOUBLE(H0); WRITE_DOUBLE(h); // Neutrino properties WRITE_DOUBLE(Neff); WRITE_INT(N_nu_mass); WRITE_DOUBLE(N_nu_rel); if (params->N_nu_mass>0){ fprintf(f, "m_nu: ["); for (int i=0; i<params->N_nu_mass; i++){ fprintf(f, "%le, ", params->m_nu[i]); } fprintf(f, "]\n"); } WRITE_DOUBLE(sum_nu_masses); WRITE_DOUBLE(Omega_nu_mass); WRITE_DOUBLE(Omega_nu_rel); // Primordial power spectra WRITE_DOUBLE(A_s); WRITE_DOUBLE(n_s); // Radiation parameters WRITE_DOUBLE(Omega_g); WRITE_DOUBLE(T_CMB); // BCM baryonic model parameters WRITE_DOUBLE(bcm_log10Mc); WRITE_DOUBLE(bcm_etab); WRITE_DOUBLE(bcm_ks); // Modified gravity parameters WRITE_DOUBLE(mu_0); WRITE_DOUBLE(sigma_0); // Derived parameters WRITE_DOUBLE(sigma8); WRITE_DOUBLE(Omega_l); WRITE_DOUBLE(z_star); WRITE_INT(has_mgrowth); WRITE_INT(nz_mgrowth); if (params->has_mgrowth){ fprintf(f, "z_mgrowth: ["); for (int i=0; i<params->nz_mgrowth; i++){ fprintf(f, "%le, ", params->z_mgrowth[i]); } fprintf(f, "]\n"); fprintf(f, "df_mgrowth: ["); for (int i=0; i<params->nz_mgrowth; i++){ fprintf(f, "%le, ", params->df_mgrowth[i]); } fprintf(f, "]\n"); } #undef WRITE_DOUBLE #undef WRITE_INT fclose(f); } /** * Write a cosmology parameters object to a file in yaml format. * @param cosmo Cosmological parameters * @param f FILE* pointer opened for reading * @return void */ ccl_parameters ccl_parameters_read_yaml(const char * filename, int *status) { FILE * f = fopen(filename, "r"); if (!f) { *status = CCL_ERROR_FILE_READ; ccl_parameters bad_params; ccl_raise_warning(CCL_ERROR_FILE_READ, "ccl_core.c: Failed to read parameters from file."); return bad_params; } #define READ_DOUBLE(name) double name; *status |= (0==fscanf(f, #name ": %le\n",&name)); #define READ_INT(name) int name; *status |= (0==fscanf(f, #name ": %d\n",&name)) // Densities: CDM, baryons, total matter, curvature READ_DOUBLE(Omega_c); READ_DOUBLE(Omega_b); READ_DOUBLE(Omega_m); READ_DOUBLE(Omega_k); READ_INT(k_sign); // Dark Energy READ_DOUBLE(w0); READ_DOUBLE(wa); // Hubble parameters READ_DOUBLE(H0); READ_DOUBLE(h); // Neutrino properties READ_DOUBLE(Neff); READ_INT(N_nu_mass); READ_DOUBLE(N_nu_rel); double mnu[3] = {0.0, 0.0, 0.0}; if (N_nu_mass>0){ *status |= (0==fscanf(f, "m_nu: [")); for (int i=0; i<N_nu_mass; i++){ *status |= (0==fscanf(f, "%le, ", mnu+i)); } *status |= (0==fscanf(f, "]\n")); } READ_DOUBLE(sum_nu_masses); READ_DOUBLE(Omega_nu_mass); READ_DOUBLE(Omega_nu_rel); // Primordial power spectra READ_DOUBLE(A_s); READ_DOUBLE(n_s); // Radiation parameters READ_DOUBLE(Omega_g); READ_DOUBLE(T_CMB); // BCM baryonic model parameters READ_DOUBLE(bcm_log10Mc); READ_DOUBLE(bcm_etab); READ_DOUBLE(bcm_ks); // Modified gravity parameters READ_DOUBLE(mu_0); READ_DOUBLE(sigma_0); // Derived parameters READ_DOUBLE(sigma8); READ_DOUBLE(Omega_l); READ_DOUBLE(z_star); READ_INT(has_mgrowth); READ_INT(nz_mgrowth); double *z_mgrowth; double *df_mgrowth; if (has_mgrowth){ z_mgrowth = malloc(nz_mgrowth*sizeof(double)); df_mgrowth = malloc(nz_mgrowth*sizeof(double)); *status |= (0==fscanf(f, "z_mgrowth: [")); for (int i=0; i<nz_mgrowth; i++){ *status |= (0==fscanf(f, "%le, ", z_mgrowth+i)); } *status |= (0==fscanf(f, "]\n")); *status |= (0==fscanf(f, "df_mgrowth: [")); for (int i=0; i<nz_mgrowth; i++){ *status |= (0==fscanf(f, "%le, ", df_mgrowth+i)); } *status |= (0==fscanf(f, "]\n")); } else{ z_mgrowth = NULL; df_mgrowth = NULL; } #undef READ_DOUBLE #undef READ_INT fclose(f); if (*status) { ccl_raise_warning( *status, "ccl_core.c: Structure of YAML file incorrect: %s", filename); } double norm_pk; if (isnan(A_s)){ norm_pk = sigma8; } else{ norm_pk = A_s; } ccl_parameters params = ccl_parameters_create( Omega_c, Omega_b, Omega_k, Neff, mnu, N_nu_mass, w0, wa, h, norm_pk, n_s, bcm_log10Mc, bcm_etab, bcm_ks, mu_0, sigma_0, nz_mgrowth, z_mgrowth, df_mgrowth, status); if(z_mgrowth) free(z_mgrowth); if (df_mgrowth) free(df_mgrowth); return params; } /* ------- ROUTINE: ccl_data_free -------- INPUT: ccl_data TASK: free the input data */ void ccl_data_free(ccl_data * data) { //We cannot assume that all of these have been allocated //TODO: it would actually make more sense to do this within ccl_cosmology_free, //where we could make use of the flags "computed_distances" etc. to figure out //what to free up gsl_spline_free(data->chi); gsl_spline_free(data->growth); gsl_spline_free(data->fgrowth); gsl_spline_free(data->E); gsl_spline_free(data->achi); gsl_spline_free(data->logsigma); gsl_spline_free(data->dlnsigma_dlogm); ccl_f2d_t_free(data->p_lin); ccl_f2d_t_free(data->p_nl); ccl_f1d_t_free(data->rsd_splines[0]); ccl_f1d_t_free(data->rsd_splines[1]); ccl_f1d_t_free(data->rsd_splines[2]); } /* ------- ROUTINE: ccl_cosmology_set_status_message -------- INPUT: ccl_cosmology struct, status_string TASK: set the status message safely. */ void ccl_cosmology_set_status_message(ccl_cosmology * cosmo, const char * message, ...) { const int trunc = 480; /* must be < 500 - 4 */ va_list va; va_start(va, message); #pragma omp critical { vsnprintf(cosmo->status_message, trunc, message, va); /* if truncation happens, message[trunc - 1] is not NULL, ... will show up. */ strcpy(&cosmo->status_message[trunc], "..."); } va_end(va); } /* ------- ROUTINE: ccl_parameters_free -------- INPUT: ccl_parameters struct TASK: free allocated quantities in the parameters struct */ void ccl_parameters_free(ccl_parameters * params) { if (params->m_nu != NULL){ free(params->m_nu); params->m_nu = NULL; } if (params->z_mgrowth != NULL){ free(params->z_mgrowth); params->z_mgrowth = NULL; } if (params->df_mgrowth != NULL){ free(params->df_mgrowth); params->df_mgrowth = NULL; } } /* ------- ROUTINE: ccl_cosmology_free -------- INPUT: ccl_cosmology struct TASK: free the input data and the cosmology struct */ void ccl_cosmology_free(ccl_cosmology * cosmo) { if (cosmo != NULL) ccl_data_free(&cosmo->data); free(cosmo); } int ccl_get_pk_spline_na(ccl_cosmology *cosmo) { return cosmo->spline_params.A_SPLINE_NA_PK + cosmo->spline_params.A_SPLINE_NLOG_PK - 1; } void ccl_get_pk_spline_a_array(ccl_cosmology *cosmo,int ndout,double* doutput,int *status) { double *d = NULL; if (ndout != ccl_get_pk_spline_na(cosmo)) *status = CCL_ERROR_INCONSISTENT; if (*status == 0) { d = ccl_linlog_spacing(cosmo->spline_params.A_SPLINE_MINLOG_PK, cosmo->spline_params.A_SPLINE_MIN_PK, cosmo->spline_params.A_SPLINE_MAX, cosmo->spline_params.A_SPLINE_NLOG_PK, cosmo->spline_params.A_SPLINE_NA_PK); if (d == NULL) *status = CCL_ERROR_MEMORY; } if(*status==0) memcpy(doutput, d, ndout*sizeof(double)); free(d); } int ccl_get_pk_spline_nk(ccl_cosmology *cosmo) { double ndecades = log10(cosmo->spline_params.K_MAX) - log10(cosmo->spline_params.K_MIN); return (int)ceil(ndecades*cosmo->spline_params.N_K); } void ccl_get_pk_spline_lk_array(ccl_cosmology *cosmo,int ndout,double* doutput,int *status) { double *d = NULL; if (ndout != ccl_get_pk_spline_nk(cosmo)) *status = CCL_ERROR_INCONSISTENT; if (*status == 0) { d = ccl_log_spacing(cosmo->spline_params.K_MIN, cosmo->spline_params.K_MAX, ndout); if (d == NULL) *status = CCL_ERROR_MEMORY; } if (*status == 0) { for(int ii=0; ii < ndout; ii++) doutput[ii] = log(d[ii]); } free(d); }
pcg.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> #include <math.h> #include "pcg.h" // Left preconditioned Conjugate Gradient for solving A * x = b void pcg( const int n, const DTYPE tol, const int max_iter, const matvec_fptr Ax, const void *Ax_param, const DTYPE *b, const matvec_fptr invMx, const void *invMx_param, DTYPE *x, int *flag_, DTYPE *relres_, int *iter_, DTYPE *res_vec ) { size_t vec_msize = sizeof(DTYPE) * n; DTYPE *r = (DTYPE*) malloc(vec_msize); DTYPE *z = (DTYPE*) malloc(vec_msize); DTYPE *p = (DTYPE*) malloc(vec_msize); DTYPE *s = (DTYPE*) malloc(vec_msize); assert(r != NULL && z != NULL && p != NULL && s != NULL); // r = b - A * x; Ax(Ax_param, x, r); #pragma omp simd for (int i = 0; i < n; i++) r[i] = b[i] - r[i]; // z = M \ r; if (invMx != NULL) invMx(invMx_param, r, z); else memcpy(z, r, vec_msize); // p = z; memcpy(p, z, vec_msize); // rho = r' * z; DTYPE rho = 0.0; #pragma omp simd for (int i = 0; i < n; i++) rho += r[i] * z[i]; // b_2norm = norm(b, 2); // r_2norm = norm(r, 2); // rn_stop = b_2norm * tol; DTYPE b_2norm = 0.0, r_2norm = 0.0, rn_stop; #pragma omp simd for (int i = 0; i < n; i++) { b_2norm += b[i] * b[i]; r_2norm += r[i] * r[i]; } b_2norm = DSQRT(b_2norm); r_2norm = DSQRT(r_2norm); rn_stop = b_2norm * tol; int iter = 0; DTYPE alpha, beta, rho0, tmp; while (iter < max_iter && r_2norm > rn_stop) { // s = A * p; Ax(Ax_param, p, s); // alpha = rho / (p' * s); tmp = 0.0; #pragma omp simd for (int i = 0; i < n; i++) tmp += p[i] * s[i]; alpha = rho / tmp; // x = x + alpha * p; // r = r - alpha * s; #pragma omp simd for (int i = 0; i < n; i++) { x[i] += alpha * p[i]; r[i] -= alpha * s[i]; } // z = M \ r; if (invMx != NULL) invMx(invMx_param, r, z); else memcpy(z, r, vec_msize); // rho0 = rho; // rho = r' * z; rho0 = rho; rho = 0.0; #pragma omp simd for (int i = 0; i < n; i++) rho += r[i] * z[i]; // beta = rho / rho0; // p = z + beta * p; beta = rho / rho0; #pragma omp simd for (int i = 0; i < n; i++) p[i] = z[i] + beta * p[i]; // r_2norm = norm(r, 2); // resvec(iter) = r_2norm; // iter = iter + 1; r_2norm = 0.0; #pragma omp simd for (int i = 0; i < n; i++) r_2norm += r[i] * r[i]; r_2norm = DSQRT(r_2norm); if (res_vec != NULL) res_vec[iter] = r_2norm; iter++; //printf("%e\n", r_2norm / b_2norm); } // End of while *flag_ = (r_2norm <= rn_stop) ? 0 : 1; *relres_ = r_2norm / b_2norm; *iter_ = iter; free(r); free(z); free(p); free(s); }
target_uses_allocators.c
// Test host codegen. // RUN: %clang_cc1 -verify -fopenmp -fopenmp-version=50 -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-llvm %s -o - | FileCheck %s --check-prefix CHECK --check-prefix CHECK-64 // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -fopenmp-version=50 -x c++ -triple powerpc64le-unknown-unknown -fopenmp-targets=powerpc64le-ibm-linux-gnu // expected-no-diagnostics #ifndef HEADER #define HEADER enum omp_allocator_handle_t { omp_null_allocator = 0, omp_default_mem_alloc = 1, omp_large_cap_mem_alloc = 2, omp_const_mem_alloc = 3, omp_high_bw_mem_alloc = 4, omp_low_lat_mem_alloc = 5, omp_cgroup_mem_alloc = 6, omp_pteam_mem_alloc = 7, omp_thread_mem_alloc = 8, KMP_ALLOCATOR_MAX_HANDLE = __UINTPTR_MAX__ }; // CHECK: define {{.*}}[[FIE:@.+]]() void fie() { int x; #pragma omp target uses_allocators(omp_null_allocator) allocate(omp_null_allocator: x) firstprivate(x) {} #pragma omp target uses_allocators(omp_default_mem_alloc) allocate(omp_default_mem_alloc: x) firstprivate(x) {} #pragma omp target uses_allocators(omp_large_cap_mem_alloc) allocate(omp_large_cap_mem_alloc: x) firstprivate(x) {} #pragma omp target uses_allocators(omp_const_mem_alloc) allocate(omp_const_mem_alloc: x) firstprivate(x) {} #pragma omp target uses_allocators(omp_high_bw_mem_alloc) allocate(omp_high_bw_mem_alloc: x) firstprivate(x) {} #pragma omp target uses_allocators(omp_low_lat_mem_alloc) allocate(omp_low_lat_mem_alloc: x) firstprivate(x) {} #pragma omp target uses_allocators(omp_cgroup_mem_alloc) allocate(omp_cgroup_mem_alloc: x) firstprivate(x) {} #pragma omp target uses_allocators(omp_pteam_mem_alloc) allocate(omp_pteam_mem_alloc: x) firstprivate(x) {} } #endif
PagerankCpuCode.c
#include "PagerankCpuCode.h" #define AVG_DEG 8 //4 //#define NUM_NODES (512*1024) //(128*1024) //#define NUM_EDGES (NUM_NODES*AVG_DEG) int NUM_NODES = (512*1024); //(128*1024) int NUM_EDGES; // (NUM_NODES*AVG_DEG) #include "bfs.h" #include "graph_gen.h" #include "edge_list_input.h" #include <omp.h> // Globals void *data_in, *data_out; void assign24 (uint24_t* ptr, unsigned long long val) { unsigned long long tmp = 0xff; ptr->c[0] = val & tmp; ptr->c[1] = (val & (tmp<< 8)) >> 8; ptr->c[2] = (val & (tmp<<16)) >> 16; } unsigned long long getval24(uint24_t* ptr) { unsigned long long retval = 0; retval += (unsigned long long)(ptr->c[0]); retval += (unsigned long long)(ptr->c[1]) << 8; retval += (unsigned long long)(ptr->c[2]) << 16; return retval; } void assign48 (uint48_t* ptr, unsigned long long val) { unsigned long long tmp = 0xff; ptr->c[0] = val & tmp; ptr->c[1] = (val & (tmp<< 8)) >> 8; ptr->c[2] = (val & (tmp<<16)) >> 16; ptr->c[3] = (val & (tmp<<24)) >> 24; ptr->c[4] = (val & (tmp<<32)) >> 32; ptr->c[5] = (val & (tmp<<40)) >> 40; } unsigned long long getval48(uint48_t* ptr) { unsigned long long retval = 0; retval += (unsigned long long)(ptr->c[0]); retval += (unsigned long long)(ptr->c[1]) << 8; retval += (unsigned long long)(ptr->c[2]) << 16; retval += (unsigned long long)(ptr->c[3]) << 24; retval += (unsigned long long)(ptr->c[4]) << 32; retval += (unsigned long long)(ptr->c[5]) << 40; return retval; } void assign_f48(float48_t* ptr, float val) { uint32_t tmp = 0xff; uint32_t *p = (uint32_t*)(&val); //needed to do bit-wise operations ptr->c[0] = *p & tmp; ptr->c[1] = (*p & (tmp<< 8)) >> 8; ptr->c[2] = (*p & (tmp<<16)) >> 16; ptr->c[3] = (*p & (tmp<<24)) >> 24; //ptr->c[4] = 0; //ptr->c[5] = 0; } float getval_f48(float48_t* ptr) { uint32_t retval_int = 0; //need 32-bit zero quantity retval_int |= (ptr->c[0]); retval_int |= ((ptr->c[1]) << 8); retval_int |= ((ptr->c[2]) << 16); retval_int |= ((ptr->c[3]) << 24); float* p = (float*)(&retval_int); float retval = *p; return retval; } ///////////////////////////////////// // // Helper Functions // void cleanup() { free(data_in); free(data_out); } /* uint64_t micro_time(void) { struct timeval t; //struct timezone z; //gettimeofday(&t, &z); gettimeofday(&t, NULL); return t.tv_sec * 1000000 + t.tv_usec; } */ struct timespec ts_diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } void init_prop_uint24_root(prop_uint24_t* prop_ptr, uint64_t N, uint64_t root, unsigned long long init_val) { for (uint64_t i = 0; i < N; i++) { if (i == root) assign24(&prop_ptr[i], 0); //set root to zero else assign24(&prop_ptr[i], init_val); //set other nodes to 'init_val' } } void init_prop_uint24(prop_uint24_t* prop_ptr, uint64_t N, unsigned long long init_val) { for (uint64_t i = 0; i < N; i++) { assign24(&prop_ptr[i], init_val); } } void init_prop_f48_root(prop_f48_t* prop_ptr, uint64_t N, uint64_t root, float init_val) { for (uint64_t i = 0; i < N; i++) { if (i == root) assign_f48(&prop_ptr[i], 1.0); //set root to 1.0 else assign_f48(&prop_ptr[i], init_val); //set other nodes to 'init_val' } } void init_prop_f48(prop_f48_t* prop_ptr, uint64_t N, float init_val) { for (uint64_t i = 0; i < N; i++) { assign_f48(&prop_ptr[i], init_val); } } void init_rep_f48(prop_f48_t* rep_ptr, uint64_t M, float init_val) { for (uint64_t i = 0; i < M; i++) { assign_f48(&rep_ptr[i], init_val); } } // Go through float48 prop array and put degree of node in most significant 16 bits void prepend_deg(prop_f48_t* prop_ptr, node_t* n_ptr) { int i = 0; uint16_t tmp = 0xff; for (i = 0; i < NUM_NODES; i++) { uint16_t deg = (uint16_t)(getval48(&n_ptr[i+1]) - getval48(&n_ptr[i])); float48_t* ptr = (float48_t*)(&prop_ptr[i]); ptr->c[4] = (deg & tmp); ptr->c[5] = (deg & (tmp<<8)) >> 8; } } // Go through float48 rep array and put degree of node in most significant 16 bits void prepend_deg_rep(prop_f48_t* rep_ptr, node_t* n_ptr, edge_t* e_ptr) { int i = 0; uint16_t tmp = 0xff; for (i = 0; i < NUM_NODES; i++) { for (uint64_t j = getval48(&n_ptr[i]); j < getval48(&n_ptr[i+1]); j++) { uint64_t node = getval48(&e_ptr[j]); uint16_t deg = (uint16_t)(getval48(&n_ptr[node+1]) - getval48(&n_ptr[node])); float48_t* ptr = (float48_t*)(&rep_ptr[j]); ptr->c[4] = (deg & tmp); ptr->c[5] = (deg & (tmp<<8)) >> 8; } } } // retrieve the most significant 16-bits as a 16-bit integer uint16_t getdeg16(float48_t* ptr) { uint16_t retval = 0; retval |= (ptr->c[4]); retval |= ( (ptr->c[5]) << 8); return retval; } // Divide every 32-bit float in rep array by its 16-bit integer (degree) void div_by_deg_rep(prop_f48_t* rep_ptr, node_t* n_ptr) { for (int i = 0; i < NUM_NODES; i++) { for (uint64_t j = getval48(&n_ptr[i]); j < getval48(&n_ptr[i+1]); j++) { float48_t* ptr = (float48_t*)(&rep_ptr[j]); float f = getval_f48(ptr); uint16_t deg = getdeg16(ptr); if (deg == 0) deg = (uint16_t)1; // prevent division by zero assign_f48(ptr, f/((float)deg) ); } } } // pagerank: accumulate diff between prop array and rep array float acc_diff(node_t *n_ptr, edge_t* e_ptr, prop_f48_t* prop_ptr, prop_f48_t* rep_ptr) { // This is an O(1) lookup. This data structure can be created off-line. // use this data structure to determine where in edge list we can find a rep // array entry for each entry int *pos_map = malloc(NUM_NODES*sizeof(int)); for (int i = 0; i < NUM_NODES; i++) pos_map[i]=-1; for (int j = 0; j < NUM_EDGES; j++) { // populate the mappings int dst_node = getval48(&e_ptr[j]); if (pos_map[dst_node] == -1) pos_map[dst_node] = j; } float diff = 0.0; for (int i = 0; i < NUM_NODES; i++) { float48_t* ptr = (float48_t*)(&prop_ptr[i]); float prop_val = getval_f48(ptr); float rep_val = 0.0; uint16_t deg = 0; // need to find location of a rep array entry for this node int rep_pos = pos_map[i]; float48_t* r_ptr = (float48_t*)(&rep_ptr[rep_pos]); rep_val = getval_f48(r_ptr); deg = getdeg16(r_ptr); /* // brute force search takes a VERY long time for large graphs for (uint64_t j = 0; j < NUM_EDGES; j++) { if (i == getval48(&e_ptr[j])) { float48_t* r_ptr = (float48_t*)(&rep_ptr[j]); rep_val = getval_f48(r_ptr); deg = getdeg16(r_ptr); break; } } */ //if (i<10) /**************DEBUG********************/ //printf("@ i=%d, deg=%d, prop_val=%f, rep_val=%f, rep_val*deg=%f, diff=%f..\n", // i, deg, prop_val, rep_val, rep_val*(float)deg, diff); // rep_val has been divided by deg. Account for this when calculating the diff diff += fabsf(prop_val - ((float)deg*rep_val) ); } free(pos_map); return diff; } /* this version assumes bi-directional graph // pagerank: accumulate diff between prop array and rep array float acc_diff(node_t *n_ptr, edge_t* e_ptr, prop_f48_t* prop_ptr, prop_f48_t* rep_ptr) { float diff = 0.0; for (int i = 0; i < NUM_NODES; i++) { float48_t* ptr = (float48_t*)(&prop_ptr[i]); float prop_val = getval_f48(ptr); // need to find location of a rep array entry for this node float rep_val = 0.0; uint64_t edge_idx = getval48(&n_ptr[i]); // take first nbr uint64_t nbr_node = getval48(&e_ptr[edge_idx]); // look at that nbr's nbrs for (uint64_t j = getval48(&n_ptr[nbr_node]); j < getval48(&n_ptr[nbr_node+1]); j++) { if (i == getval48(&e_ptr[j])) { float48_t* r_ptr = (float48_t*)(&rep_ptr[j]); rep_val = getval_f48(r_ptr); break; } } diff += abs(prop_val - rep_val); } return diff; } */ /* // Optimized scatter // 1. Make shadow structures (node list, edge list) on host that use proper 4-byte // aligned addresses // uint32_t* node_list, edge_list; // 2. Perform parallel scatter using pragma omp #pragma omp parallel for (int i = 0; i < NUM_NODES; i++) [ for (int j = node_list[i]; j < node_list[j]; j++) { uint64_t n = edge_list[j]; rep[j] = deg[n]; //could do several updates do different replicated vectors here */ uint32_t* node_list; uint32_t* edge_list; float* prop_ar; float* rep_ar; void prepare_scat(node_t *n_ptr, edge_t *e_ptr, prop_f48_t* prop_ptr, prop_f48_t* rep_ptr) { node_list = (uint32_t*)malloc(NUM_NODES*sizeof(uint32_t)); prop_ar = (float*)malloc(NUM_NODES*sizeof(float)); edge_list = (uint32_t*)malloc(NUM_EDGES*sizeof(uint32_t)); rep_ar = (float*)malloc(NUM_EDGES*sizeof(float)); for (int i = 0; i < NUM_NODES; i++) { node_list[i] = getval48(&n_ptr[i]); prop_ar[i] = getval_f48(&prop_ptr[i]); } for (int j = 0; j < NUM_EDGES; j++) { edge_list[j] = getval48(&e_ptr[j]); //rep_ar[j] = getval_f48(&rep_ptr[j]); } } void test_scat() { // prints a few values to ensure scatter is correct for (int i = 0; i < 32; i++) { printf("node %d: [ ", i); for (int j = node_list[i]; j < node_list[i+1]; j++) { printf("%d(%f) ", edge_list[j], rep_ar[j]); } printf("] pagerank=%f\n", prop_ar[i]); } } void scatter_opt() { int i=0, j=0; omp_set_num_threads(16); int chunk = NUM_NODES/12; #pragma omp parallel private(i,j) shared(node_list,edge_list,rep_ar,prop_ar) { #pragma omp for nowait //schedule(static) for (i = 0; i < NUM_NODES; i++) { for (j = node_list[i]; j < node_list[i+1]; j++) { //uint32_t dn = edge_list[j]; //rep_ar[j] = prop_ar[dn]; rep_ar[j] = prop_ar[edge_list[j]]; } } } } void scatter_opt2() { int j=0; omp_set_num_threads(12); int chunk = NUM_NODES/12; #pragma omp parallel private(j) shared(node_list,edge_list,rep_ar,prop_ar) { #pragma omp for nowait //schedule(static) for (j = 0; j < NUM_EDGES; j++) { //uint32_t dn = edge_list[j]; //rep_ar[j] = prop_ar[dn]; rep_ar[j] = prop_ar[edge_list[j]]; } } } void end_scat() { free(node_list); free(edge_list); free(prop_ar); free(rep_ar); } // scatter values from prop array to rep array void rep_scatter(node_t *n_ptr, edge_t *e_ptr, prop_f48_t* prop_ptr, prop_f48_t* rep_ptr) { for (int i = 0; i < NUM_NODES; i++) { for (uint64_t j = getval48(&n_ptr[i]); j < getval48(&n_ptr[i+1]); j++) { uint64_t dn = getval48(&e_ptr[j]); float48_t* p_ptr = (float48_t*)(&prop_ptr[dn]); float prop_val = getval_f48(p_ptr); float48_t* r_ptr = (float48_t*)(&rep_ptr[j]); assign_f48(r_ptr, prop_val); } } } void print_graph(node_t *n_ptr, int nb, int eb, int p24b, int p48b, int b_len, const char *title_str) { FILE *f; int len = strlen(title_str); char *newstr = malloc(len+5); strcpy(newstr, title_str); strcat(newstr, ".txt"); if (NULL == (f = fopen(newstr, "w"))) { fprintf(stderr, "Error opening file %s for writing.\n", newstr); cleanup(); exit(EXIT_FAILURE); } edge_t *e_ptr = (edge_t*)n_ptr + (nb*b_len)/sizeof(*e_ptr); prop_uint24_t *p_ptr0 = (prop_uint24_t*)e_ptr + ((eb*b_len)/sizeof(prop_uint24_t)); prop_f48_t *p_ptr1 = (prop_f48_t*)p_ptr0 + ((p24b*b_len)/sizeof(prop_f48_t)); prop_f48_t *p_ptr2 = p_ptr1 + ((p48b*b_len)/sizeof(prop_f48_t)); fprintf(f, title_str); fprintf(f, "\n\n-------------------------------------\n\n"); for (int i = 0; i < NUM_NODES; i++) { fprintf(f, "node %d: [ ", i); for (uint64_t j = getval48(&n_ptr[i]); j < getval48(&n_ptr[i+1]); j++) { fprintf(f, "%lld(%d|%f) ", getval48(&e_ptr[j]), getdeg16(&p_ptr2[j]), getval_f48(&p_ptr2[j]) ); } fprintf(f, "] page_rank=%f \n", getval_f48(&p_ptr1[i])); } fprintf(f,"\n\n-------------------------------------\n\n"); fclose(f); free(newstr); } int main(int argc, char **argv) { // assign NUM_NODES and NUM_EDGES NUM_EDGES = NUM_NODES*AVG_DEG; if (argc == 2) read_graph_size(argv[1]); int burst_len_bytes = 192; //384; //max_get_burst_size(max_file_t*..) int node_bytes = (NUM_NODES+1)*sizeof(node_t); int node_bursts = ceil((double)node_bytes/(double)burst_len_bytes); int edge_bytes = (NUM_EDGES)*sizeof(edge_t); int edge_bursts = ceil((double)edge_bytes/(double)burst_len_bytes); // Application-specific: Betweeness Centrality // Node properties: levels , page_rank // [ lvls ] // lvls: 24-bit uints others: 48-bit floats (highest 16 bits are zero) // 4 property arrays needed int num_prop24s = 1; int num_prop48s = 1; // page_rank property: 48-bit float int num_rep48s = 1; // replicated arrays (num_edges elements, instead of N) int prop24_bytes = (NUM_NODES)*sizeof(prop_uint24_t); int prop48_bytes = (NUM_NODES)*sizeof(prop_f48_t); int rep48_bytes = (NUM_EDGES)*sizeof(prop_f48_t); int prop24_bursts = ceil((double)prop24_bytes/(double)burst_len_bytes); int prop48_bursts = ceil((double)prop48_bytes/(double)burst_len_bytes); int rep48_bursts = ceil((double)rep48_bytes/(double)burst_len_bytes); int prop_bursts = num_prop24s*prop24_bursts + num_prop48s*prop48_bursts + num_rep48s *rep48_bursts; int size_bytes = (node_bursts + edge_bursts + prop_bursts) * burst_len_bytes; printf("N = %d, M = %d\n", NUM_NODES, NUM_EDGES); //printf("\nsize_bytes=%d , n_bursts=%d , e_bursts=%d , p_bursts=%d\n", // size_bytes, node_bursts, edge_bursts, prop_bursts); data_in = malloc(size_bytes); data_out = malloc(size_bytes); if(!data_in || !data_out) { fprintf(stderr, "Failed to allocate memory for data I/O.\n"); return 1; } memset(data_out, 0, size_bytes); node_t *node_ptr = (node_t*)data_in; edge_t *edge_ptr = (edge_t*)data_in + ((node_bursts*burst_len_bytes)/sizeof(edge_t)); //printf("node_ptr: %p\nedge_ptr: %p\n", node_ptr, edge_ptr); if (argc == 2) { // Read graph in from edge list read_edge_list(node_ptr, edge_ptr, NUM_NODES, NUM_EDGES, argv[1]); } else { // Generate graph create_uniform_random_graph(node_ptr, edge_ptr, NUM_NODES, NUM_EDGES); //create_RMAT_graph(node_ptr, edge_ptr, 0.4, 0.25, 0.25, NUM_NODES, NUM_EDGES); // 4M node, 32M edges: pagerank was very slow (~7s runtime per iteration) // and did not converge //create_RMAT_graph(node_ptr, edge_ptr, 0.6, 0.20, 0.15, // NUM_NODES, NUM_EDGES); } // Initialize Property arrays //prop24 prop_uint24_t *prop_ptr0 = (prop_uint24_t*)edge_ptr + ((edge_bursts*burst_len_bytes)/sizeof(prop_uint24_t)); //prop48 prop_f48_t *prop_ptr1 = (prop_f48_t*)prop_ptr0 + // pointer to property array ((prop24_bursts*burst_len_bytes)/sizeof(prop_f48_t)); //rep48 prop_f48_t *prop_ptr2 = prop_ptr1 + // pointer to the replicated array ((prop48_bursts*burst_len_bytes)/sizeof(prop_f48_t)); // page_rank initial value: 1/N float init_val = (float)1/(float)NUM_NODES; init_prop_f48(prop_ptr1, NUM_NODES, init_val); //page_rank property array prepend_deg(prop_ptr1, node_ptr); //prepend degree in high 16 bits init_rep_f48(prop_ptr2, NUM_EDGES, init_val); //page_rank replicated array prepend_deg_rep(prop_ptr2, node_ptr, edge_ptr); //<--problem here div_by_deg_rep(prop_ptr2, node_ptr); printf("Finished generating input graph...\n");fflush(stdout); //print_graph(node_ptr, node_bursts, edge_bursts, // prop24_bursts, prop48_bursts, burst_len_bytes, // "InputGraph"); // Initialize timing data structures struct timespec ta_start, ta_end, ta_d; struct timespec tb_start, tb_end, tb_d; uint64_t elapsed_nsec; printf("Initializing maxfile...");fflush(stdout); max_file_t *maxfile; if (NULL == (maxfile = Pagerank_init())) { fprintf(stderr, "Problem initializing maxfile.\n"); fflush(stderr); exit(EXIT_FAILURE); } printf("done\n");fflush(stdout); printf("Creating engine, loading maxfile...");fflush(stdout); max_engine_t *eng; if (NULL == (eng = max_load(maxfile, "*"))) { fprintf(stderr, "Problem creating engine.\n"); fflush(stderr); max_file_free(maxfile); exit(EXIT_FAILURE); } printf("done\n\n");fflush(stdout); printf("Preparing params for loading data into DFE memory...");fflush(stdout); int num_elems_32 = size_bytes / sizeof(uint32_t); Pagerank_writeLMem_actions_t wr_actions; wr_actions.param_size = num_elems_32; wr_actions.param_start = 0; wr_actions.instream_fromcpu = (uint32_t*)data_in; printf("done\n");fflush(stdout); //PageRank parameters // e: acceptable error // d: damping factor (kernel parameter) // max: maximum number of iterations allowed for convergence float d = 0.85; float e = 0.001; int max = 5; float term = (1.0 - d)/(float)NUM_NODES; //Debug uint64_t numBrsts0; uint64_t numBrsts1; uint64_t numBrsts2; uint64_t numBrsts3; uint64_t numBrsts4; printf("Preparing params for running engine on FPGA...");fflush(stdout); Pagerank_actions_t grph_actions; grph_actions.param_nodeAddr = 0; // address of normal page_rank array grph_actions.param_propAddr = node_bursts + edge_bursts + prop24_bursts; // lvl array // address of replicated page_rank array grph_actions.param_repAddr = node_bursts + edge_bursts + prop24_bursts + // lvl array prop48_bursts; // BCRep array grph_actions.param_NumNodes = NUM_NODES; grph_actions.param_StartCnt = 100; grph_actions.param_prTerm = term; // pagerank term grph_actions.param_d = d; // damping factor: d //cycle at which to send stop interrupt grph_actions.param_StopCnt = (512*1024*1024); //number of cycles for done signal stability grph_actions.param_uDVal = 20; //Scalar Outputs grph_actions.outscalar_MemUnit0_numBrsts = &numBrsts0; grph_actions.outscalar_MemUnit1_numBrsts = &numBrsts1; grph_actions.outscalar_MemUnit2_numBrsts = &numBrsts2; grph_actions.outscalar_MemUnit3_numBrsts = &numBrsts3; grph_actions.outscalar_MemUnit4_numBrsts = &numBrsts4; max_config_set_string(MAX_CONFIG_DEBUG_DIRECTORY, "./"); printf("done\n");fflush(stdout); printf("Preparing params for reading DFE memory...");fflush(stdout); Pagerank_readLMem_actions_t rd_actions; rd_actions.param_size = num_elems_32; rd_actions.param_start = 0; rd_actions.outstream_tocpu = (uint32_t*)data_out; printf("done\n");fflush(stdout); // global timers uint64_t wr_time=0, run_time=0, rd_time=0, diff_time=0, scatter_time=0; uint64_t opt_scat_time=0; int iter = 0; // iteration number float diff = 0.0; clock_gettime(CLOCK_REALTIME, &tb_start); do { diff = 0.0; printf("\nBeginning iteration %d..\n", iter); // write to fpga printf("Loading data into DFE memory...");fflush(stdout); clock_gettime(CLOCK_REALTIME, &ta_start); Pagerank_writeLMem_run(eng, &wr_actions); clock_gettime(CLOCK_REALTIME, &ta_end); ta_d = ts_diff(ta_start, ta_end); elapsed_nsec = ta_d.tv_sec*(1E9) + ta_d.tv_nsec; printf("done..%ld:%ld -> %ldns\n", ta_d.tv_sec, ta_d.tv_nsec, elapsed_nsec);fflush(stdout); wr_time += elapsed_nsec; // call computation printf("----\nRunning engine...");fflush(stdout); clock_gettime(CLOCK_REALTIME, &ta_start); Pagerank_run( eng , &grph_actions ); // Advanced Static Interface clock_gettime(CLOCK_REALTIME, &ta_end); printf("done\n");fflush(stdout); printf("End: %ld:%ld\n", ta_end.tv_sec, ta_end.tv_nsec); ta_d = ts_diff(ta_start, ta_end); elapsed_nsec = ta_d.tv_sec*(1E9) + ta_d.tv_nsec; printf("Process time: %ld:%ld -> %ldns\n----\n", ta_d.tv_sec, ta_d.tv_nsec, elapsed_nsec); run_time += elapsed_nsec; // read from fpga printf("Reading DFE memory...");fflush(stdout); clock_gettime(CLOCK_REALTIME, &ta_start); Pagerank_readLMem_run(eng, &rd_actions); clock_gettime(CLOCK_REALTIME, &ta_end); ta_d = ts_diff(ta_start, ta_end); elapsed_nsec = ta_d.tv_sec*(1E9) + ta_d.tv_nsec; printf("done..%ld:%ld -> %ldns\n", ta_d.tv_sec, ta_d.tv_nsec, elapsed_nsec);fflush(stdout); rd_time += elapsed_nsec; // copy output data to input data memcpy((void*)data_in, (void*)data_out, size_bytes); // accumulate the diff: old values are still in rep array printf("Accumulating diff...");fflush(stdout); clock_gettime(CLOCK_REALTIME, &ta_start); diff = acc_diff(node_ptr, edge_ptr, prop_ptr1, prop_ptr2); clock_gettime(CLOCK_REALTIME, &ta_end); ta_d = ts_diff(ta_start, ta_end); elapsed_nsec = ta_d.tv_sec*(1E9) + ta_d.tv_nsec; printf("done..%ld:%ld -> %ldns\n", ta_d.tv_sec, ta_d.tv_nsec, elapsed_nsec);fflush(stdout); diff_time += elapsed_nsec; // scatter: from prop array to rep array printf("Scattering from prop array to rep array and dividing...");fflush(stdout); clock_gettime(CLOCK_REALTIME, &ta_start); rep_scatter(node_ptr, edge_ptr, prop_ptr1, prop_ptr2); div_by_deg_rep(prop_ptr2, node_ptr); // rep arrays store pr/deg clock_gettime(CLOCK_REALTIME, &ta_end); ta_d = ts_diff(ta_start, ta_end); elapsed_nsec = ta_d.tv_sec*(1E9) + ta_d.tv_nsec; printf("done..%ld:%ld -> %ldns\n", ta_d.tv_sec, ta_d.tv_nsec, elapsed_nsec);fflush(stdout); scatter_time += elapsed_nsec; // Optimized scatter function printf("Preparing optimized scatter...");fflush(stdout); prepare_scat(node_ptr, edge_ptr, prop_ptr1, prop_ptr2); printf("done...\n");fflush(stdout); //test_scat(); printf("Optimized scatter for time...");fflush(stdout); clock_gettime(CLOCK_REALTIME, &ta_start); scatter_opt2(); //scatter_opt(); clock_gettime(CLOCK_REALTIME, &ta_end); //test_scat(); end_scat(); ta_d = ts_diff(ta_start, ta_end); elapsed_nsec = ta_d.tv_sec*(1E9) + ta_d.tv_nsec; printf("done..%ld:%ld -> %ldns\n", ta_d.tv_sec, ta_d.tv_nsec, elapsed_nsec);fflush(stdout); opt_scat_time += elapsed_nsec; printf("Ending iteration %d..diff=%f\n", iter, diff); iter++; } while ((diff > e) && (iter < max)); clock_gettime(CLOCK_REALTIME, &tb_end); tb_d = ts_diff(tb_start, tb_end); elapsed_nsec = tb_d.tv_sec*(1E9) + tb_d.tv_nsec; printf("\n----\nOverall time: %ld:%ld -> %ldns\n\n", tb_d.tv_sec, tb_d.tv_nsec, elapsed_nsec); printf("run_time = %f ms\n", (float)run_time/(float)1000000); printf("diff_time = %f ms\n", (float)diff_time/(float)1000000); printf("scatter_time = %f ms\n", (float)scatter_time/(float)1000000); printf("opt_scatter_time = %f ms\n", (float)opt_scat_time/(float)1000000); printf("\nwrite_to_fpga = %f ms\n", (float)wr_time/(float)1000000); printf("read_from_fpga = %f ms\n", (float)rd_time/(float)1000000); printf("MemUnits: num0Brsts:%d, numBrsts1:%d, numBrsts2:%d, numBrsts3:%d, numBrsts4:%d\n", numBrsts0, numBrsts1, numBrsts2, numBrsts3, numBrsts4); printf("\nUnloading engine...");fflush(stdout); max_unload(eng); printf("done\n");fflush(stdout); print_graph((node_t*)data_in, node_bursts, edge_bursts, prop24_bursts, prop48_bursts, burst_len_bytes, "OutputGraph"); cleanup(); return 0; }
generated-funcs.c
// Check that the CHECK lines are generated for clang-generated functions // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp %s -emit-llvm -o - | FileCheck --check-prefix=OMP %s // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu %s -emit-llvm -o - | FileCheck --check-prefix=NOOMP %s const int size = 1024 * 1024 * 32; double A[size]; void foo(void); int main() { int i = 0; #pragma omp parallel for for (i = 0; i < size; ++i) { A[i] = 0.0; } foo(); return 0; } void foo(void) { int i = 0; #pragma omp parallel for for (i = 0; i < size; ++i) { A[i] = 1.0; } }